aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/compiler-rt/include/xray/xray_interface.h91
-rw-r--r--contrib/compiler-rt/lib/asan/asan_interceptors.cc10
-rw-r--r--contrib/compiler-rt/lib/asan/asan_interceptors.h6
-rw-r--r--contrib/compiler-rt/lib/builtins/emutls.c2
-rw-r--r--contrib/compiler-rt/lib/builtins/int_types.h4
-rw-r--r--contrib/compiler-rt/lib/cfi/cfi_blacklist.txt5
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_allocator.cpp9
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_allocator.h8
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls.h15
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls_android.cpp95
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls_android.inc44
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls_context_android.inc54
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls_context_linux.inc29
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls_linux.cpp10
-rw-r--r--contrib/compiler-rt/lib/scudo/scudo_tls_linux.inc (renamed from contrib/compiler-rt/lib/scudo/scudo_tls_linux.h)16
-rw-r--r--contrib/compiler-rt/lib/ubsan/ubsan_diag_standalone.cc37
-rw-r--r--contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc3
-rw-r--r--contrib/compiler-rt/lib/xray/xray_init.cc4
-rw-r--r--contrib/compiler-rt/lib/xray/xray_interface.cc141
-rw-r--r--contrib/compiler-rt/lib/xray/xray_interface_internal.h7
-rw-r--r--contrib/libc++/include/__config82
-rw-r--r--contrib/libc++/include/__locale1
-rw-r--r--contrib/libc++/include/__threading_support5
-rw-r--r--contrib/libc++/include/exception59
-rw-r--r--contrib/libc++/include/experimental/dynarray2
-rw-r--r--contrib/libc++/include/experimental/optional9
-rw-r--r--contrib/libc++/include/functional6
-rw-r--r--contrib/libc++/include/future50
-rw-r--r--contrib/libc++/include/istream2
-rw-r--r--contrib/libc++/include/locale22
-rw-r--r--contrib/libc++/include/memory15
-rw-r--r--contrib/libc++/include/new16
-rw-r--r--contrib/libc++/include/ostream2
-rw-r--r--contrib/libc++/include/random22
-rw-r--r--contrib/libc++/include/shared_mutex8
-rw-r--r--contrib/libc++/include/streambuf2
-rw-r--r--contrib/libc++/include/typeinfo1
-rw-r--r--contrib/libc++/src/exception.cpp2
-rw-r--r--contrib/libc++/src/experimental/filesystem/operations.cpp4
-rw-r--r--contrib/libc++/src/locale.cpp12
-rw-r--r--contrib/libc++/src/memory.cpp6
-rw-r--r--contrib/libc++/src/support/runtime/exception_pointer_msvc.ipp94
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h11
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h33
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h18
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h16
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h12
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h12
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h1
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h10
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h54
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MIRPrinter.h (renamed from contrib/llvm/lib/CodeGen/MIRPrinter.h)13
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h2
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeDatabase.h2
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h5
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h331
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h8
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h114
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h13
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h4
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h8
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h11
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h6
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCSerialization.h4
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h6
-rw-r--r--contrib/llvm/include/llvm/IR/Attributes.h15
-rw-r--r--contrib/llvm/include/llvm/IR/BasicBlock.h13
-rw-r--r--contrib/llvm/include/llvm/IR/CFG.h54
-rw-r--r--contrib/llvm/include/llvm/IR/CallSite.h4
-rw-r--r--contrib/llvm/include/llvm/IR/CallingConv.h12
-rw-r--r--contrib/llvm/include/llvm/IR/ConstantRange.h7
-rw-r--r--contrib/llvm/include/llvm/IR/DataLayout.h45
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfo.h25
-rw-r--r--contrib/llvm/include/llvm/IR/Dominators.h14
-rw-r--r--contrib/llvm/include/llvm/IR/Function.h1
-rw-r--r--contrib/llvm/include/llvm/IR/InlineAsm.h6
-rw-r--r--contrib/llvm/include/llvm/IR/InstIterator.h34
-rw-r--r--contrib/llvm/include/llvm/IR/InstrTypes.h46
-rw-r--r--contrib/llvm/include/llvm/IR/Intrinsics.td8
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsARM.td186
-rw-r--r--contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h152
-rw-r--r--contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h4
-rw-r--r--contrib/llvm/include/llvm/MC/ConstantPools.h3
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h40
-rw-r--r--contrib/llvm/include/llvm/Object/Wasm.h6
-rw-r--r--contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h15
-rw-r--r--contrib/llvm/include/llvm/Support/AArch64TargetParser.def36
-rw-r--r--contrib/llvm/include/llvm/Support/BinaryStreamArray.h15
-rw-r--r--contrib/llvm/include/llvm/Support/COFF.h44
-rw-r--r--contrib/llvm/include/llvm/Support/KnownBits.h79
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h12
-rw-r--r--contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td38
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td10
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOpcodes.def4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h1
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/Float2Int.h2
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp9
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp361
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp14
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp43
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp128
-rw-r--r--contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp79
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp77
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp52
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.cpp34
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp11
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp28
-rw-r--r--contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp68
-rw-r--r--contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/MIRPrinter.cpp73
-rw-r--r--contrib/llvm/lib/CodeGen/MIRPrintingPass.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFrameInfo.cpp26
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp21
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp67
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp89
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp57
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp62
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp88
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp49
-rw-r--r--contrib/llvm/lib/CodeGen/XRayInstrumentation.cpp46
-rw-r--r--contrib/llvm/lib/DebugInfo/CodeView/TypeDatabase.cpp5
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp39
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARF/DWARFFormValue.cpp581
-rw-r--r--contrib/llvm/lib/DebugInfo/PDB/Native/DbiModuleList.cpp273
-rw-r--r--contrib/llvm/lib/DebugInfo/PDB/Native/DbiStream.cpp112
-rw-r--r--contrib/llvm/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp8
-rw-r--r--contrib/llvm/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp15
-rw-r--r--contrib/llvm/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp2
-rw-r--r--contrib/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp26
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h2
-rw-r--r--contrib/llvm/lib/IR/ConstantRange.cpp29
-rw-r--r--contrib/llvm/lib/IR/DataLayout.cpp27
-rw-r--r--contrib/llvm/lib/IR/DebugInfo.cpp39
-rw-r--r--contrib/llvm/lib/IR/Instruction.cpp43
-rw-r--r--contrib/llvm/lib/IR/ModuleSummaryIndex.cpp12
-rw-r--r--contrib/llvm/lib/LTO/LTO.cpp9
-rw-r--r--contrib/llvm/lib/LTO/ThinLTOCodeGenerator.cpp5
-rw-r--r--contrib/llvm/lib/MC/ConstantPools.cpp11
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp42
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp45
-rw-r--r--contrib/llvm/lib/Object/WasmObjectFile.cpp68
-rw-r--r--contrib/llvm/lib/ObjectYAML/WasmYAML.cpp12
-rw-r--r--contrib/llvm/lib/Passes/PassBuilder.cpp4
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp194
-rw-r--r--contrib/llvm/lib/Support/TargetParser.cpp6
-rw-r--r--contrib/llvm/lib/Support/Unix/DynamicLibrary.inc2
-rw-r--r--contrib/llvm/lib/Support/Unix/Path.inc9
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64.h2
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64.td1
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp493
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td56
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp60
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h7
-rw-r--r--contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp9
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp3
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp91
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h5
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp3
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp14
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp10
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp15
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp53
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td215
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td26
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td233
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.h3
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp1
-rw-r--r--contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td1143
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepIICScalar.td2504
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.h56
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.td56
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepInstrFormats.td5327
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td6429
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonDepTimingClasses.h132
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIICHVX.td100
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIICScalar.td164
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td164
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td63
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV60.td180
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp152
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h15
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonPatterns.td8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonPseudo.td272
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td16
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td51
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td213
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV55.td207
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV60.td253
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV62.td112
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp302
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h10
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp110
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h2
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp9
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFLiveness.cpp33
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp15
-rw-r--r--contrib/llvm/lib/Target/Hexagon/RDFRegisters.h1
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp35
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp9323
-rw-r--r--contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td6329
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp3
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp94
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h14
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.h1
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp4
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp273
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrAVX512.td39
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td6
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td9
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp30
-rw-r--r--contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp102
-rw-r--r--contrib/llvm/lib/Target/X86/X86LegalizerInfo.h5
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.cpp80
-rw-r--r--contrib/llvm/lib/Target/X86/X86OptimizeLEAs.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterBankInfo.h7
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp10
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp32
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp97
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LowerTypeTests.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp76
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp57
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp21
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp11
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp32
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp1
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ODRHash.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AttrDocs.td15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def84
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86_64.def2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Distro.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Multilib.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/FixIt.h7
-rw-r--r--contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Distro.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Multilib.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/CrossWindows.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Format/FormatToken.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/arm_acle.h318
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/lwpintrin.h150
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h4
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp57
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp167
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp180
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp8
-rw-r--r--contrib/llvm/tools/lld/CMakeLists.txt1
-rw-r--r--contrib/llvm/tools/lld/COFF/Chunks.h2
-rw-r--r--contrib/llvm/tools/lld/COFF/ICF.cpp42
-rw-r--r--contrib/llvm/tools/lld/COFF/PDB.cpp4
-rw-r--r--contrib/llvm/tools/lld/ELF/Config.h2
-rw-r--r--contrib/llvm/tools/lld/ELF/Driver.cpp38
-rw-r--r--contrib/llvm/tools/lld/ELF/InputFiles.cpp21
-rw-r--r--contrib/llvm/tools/lld/ELF/InputFiles.h10
-rw-r--r--contrib/llvm/tools/lld/ELF/LinkerScript.cpp132
-rw-r--r--contrib/llvm/tools/lld/ELF/LinkerScript.h6
-rw-r--r--contrib/llvm/tools/lld/ELF/Options.td1
-rw-r--r--contrib/llvm/tools/lld/ELF/OutputSections.cpp24
-rw-r--r--contrib/llvm/tools/lld/ELF/Relocations.cpp16
-rw-r--r--contrib/llvm/tools/lld/ELF/SymbolTable.cpp9
-rw-r--r--contrib/llvm/tools/lld/ELF/Symbols.cpp7
-rw-r--r--contrib/llvm/tools/lld/ELF/SyntheticSections.cpp20
-rw-r--r--contrib/llvm/tools/lld/ELF/Target.cpp42
-rw-r--r--contrib/llvm/tools/lld/ELF/Target.h2
-rw-r--r--contrib/llvm/tools/lld/ELF/Writer.cpp89
-rw-r--r--contrib/llvm/tools/lld/ELF/Writer.h3
-rw-r--r--contrib/llvm/tools/lld/include/lld/Core/Parallel.h279
-rw-r--r--contrib/llvm/tools/lld/include/lld/Core/TaskGroup.h65
-rw-r--r--contrib/llvm/tools/lld/include/lld/Support/Memory.h63
-rw-r--r--contrib/llvm/tools/lld/lib/Core/CMakeLists.txt4
-rw-r--r--contrib/llvm/tools/lld/lib/Core/TaskGroup.cpp141
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/API/SBAddress.h4
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/API/SBInstruction.h2
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/API/SBInstructionList.h9
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Core/Disassembler.h2
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Expression/Expression.h10
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Host/MainLoop.h7
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Host/common/UDPSocket.h4
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallFunction.h2
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallUserExpression.h3
-rw-r--r--contrib/llvm/tools/lldb/include/lldb/Utility/TaskPool.h108
-rw-r--r--contrib/llvm/tools/lldb/source/API/SBAddress.cpp6
-rw-r--r--contrib/llvm/tools/lldb/source/API/SBInstruction.cpp7
-rw-r--r--contrib/llvm/tools/lldb/source/API/SBInstructionList.cpp26
-rw-r--r--contrib/llvm/tools/lldb/source/API/SBProcess.cpp16
-rw-r--r--contrib/llvm/tools/lldb/source/Core/Disassembler.cpp4
-rw-r--r--contrib/llvm/tools/lldb/source/Host/common/Editline.cpp2
-rw-r--r--contrib/llvm/tools/lldb/source/Host/common/MainLoop.cpp206
-rw-r--r--contrib/llvm/tools/lldb/source/Host/common/UDPSocket.cpp82
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/ABI/SysV-arm64/ABISysV_arm64.cpp34
-rw-r--r--contrib/llvm/tools/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp73
-rw-r--r--contrib/llvm/tools/lldb/source/Target/ThreadPlanCallUserExpression.cpp11
-rw-r--r--contrib/llvm/tools/lldb/source/Utility/TaskPool.cpp23
-rw-r--r--contrib/llvm/tools/llvm-link/llvm-link.cpp2
-rw-r--r--contrib/llvm/tools/llvm-lto/llvm-lto.cpp2
-rw-r--r--contrib/llvm/tools/llvm-pdbdump/Analyze.cpp2
-rw-r--r--contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp172
-rw-r--r--contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.h7
-rw-r--r--contrib/llvm/tools/llvm-pdbdump/StreamUtil.cpp13
-rw-r--r--contrib/llvm/tools/llvm-pdbdump/YAMLOutputStyle.cpp19
-rw-r--r--contrib/llvm/tools/llvm-readobj/COFFDumper.cpp111
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp23
-rw-r--r--lib/clang/include/clang/Basic/Version.inc2
-rw-r--r--lib/clang/include/lld/Config/Version.inc2
-rw-r--r--lib/clang/include/llvm/Support/VCSRevision.h2
-rw-r--r--lib/clang/libllvm/Makefile2
-rw-r--r--usr.bin/clang/lld/Makefile1
396 files changed, 25618 insertions, 20484 deletions
diff --git a/contrib/compiler-rt/include/xray/xray_interface.h b/contrib/compiler-rt/include/xray/xray_interface.h
index 52a7e1d9e944..c90025e38aae 100644
--- a/contrib/compiler-rt/include/xray/xray_interface.h
+++ b/contrib/compiler-rt/include/xray/xray_interface.h
@@ -15,10 +15,11 @@
#define XRAY_XRAY_INTERFACE_H
#include <cstdint>
+#include <stddef.h>
extern "C" {
-// Synchronize this with AsmPrinter::SledKind in LLVM.
+/// Synchronize this with AsmPrinter::SledKind in LLVM.
enum XRayEntryType {
ENTRY = 0,
EXIT = 1,
@@ -26,32 +27,43 @@ enum XRayEntryType {
LOG_ARGS_ENTRY = 3,
};
-// Provide a function to invoke for when instrumentation points are hit. This is
-// a user-visible control surface that overrides the default implementation. The
-// function provided should take the following arguments:
-//
-// - function id: an identifier that indicates the id of a function; this id
-// is generated by xray; the mapping between the function id
-// and the actual function pointer is available through
-// __xray_table.
-// - entry type: identifies what kind of instrumentation point was encountered
-// (function entry, function exit, etc.). See the enum
-// XRayEntryType for more details.
-//
-// The user handler must handle correctly spurious calls after this handler is
-// removed or replaced with another handler, because it would be too costly for
-// XRay runtime to avoid spurious calls.
-// To prevent circular calling, the handler function itself and all its
-// direct&indirect callees must not be instrumented with XRay, which can be
-// achieved by marking them all with: __attribute__((xray_never_instrument))
-//
-// Returns 1 on success, 0 on error.
+/// Provide a function to invoke for when instrumentation points are hit. This
+/// is a user-visible control surface that overrides the default implementation.
+/// The function provided should take the following arguments:
+///
+/// - function id: an identifier that indicates the id of a function; this id
+/// is generated by xray; the mapping between the function id
+/// and the actual function pointer is available through
+/// __xray_table.
+/// - entry type: identifies what kind of instrumentation point was
+/// encountered (function entry, function exit, etc.). See the
+/// enum XRayEntryType for more details.
+///
+/// The user handler must handle correctly spurious calls after this handler is
+/// removed or replaced with another handler, because it would be too costly for
+/// XRay runtime to avoid spurious calls.
+/// To prevent circular calling, the handler function itself and all its
+/// direct&indirect callees must not be instrumented with XRay, which can be
+/// achieved by marking them all with: __attribute__((xray_never_instrument))
+///
+/// Returns 1 on success, 0 on error.
extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType));
-// This removes whatever the currently provided handler is. Returns 1 on
-// success, 0 on error.
+/// This removes whatever the currently provided handler is. Returns 1 on
+/// success, 0 on error.
extern int __xray_remove_handler();
+/// Use XRay to log the first argument of each (instrumented) function call.
+/// When this function exits, all threads will have observed the effect and
+/// start logging their subsequent affected function calls (if patched).
+///
+/// Returns 1 on success, 0 on error.
+extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t));
+
+/// Disables the XRay handler used to log first arguments of function calls.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_handler_arg1();
+
enum XRayPatchingStatus {
NOT_INITIALIZED = 0,
SUCCESS = 1,
@@ -59,24 +71,31 @@ enum XRayPatchingStatus {
FAILED = 3,
};
-// This tells XRay to patch the instrumentation points. See XRayPatchingStatus
-// for possible result values.
+/// This tells XRay to patch the instrumentation points. See XRayPatchingStatus
+/// for possible result values.
extern XRayPatchingStatus __xray_patch();
-// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
-// result values.
+/// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
+/// result values.
extern XRayPatchingStatus __xray_unpatch();
-// Use XRay to log the first argument of each (instrumented) function call.
-// When this function exits, all threads will have observed the effect and
-// start logging their subsequent affected function calls (if patched).
-//
-// Returns 1 on success, 0 on error.
-extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t));
+/// This patches a specific function id. See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_patch_function(int32_t FuncId);
+
+/// This unpatches a specific function id. See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_unpatch_function(int32_t FuncId);
+
+/// This function returns the address of the function provided a valid function
+/// id. We return 0 if we encounter any error, even if 0 may be a valid function
+/// address.
+extern uintptr_t __xray_function_address(int32_t FuncId);
+
+/// This function returns the maximum valid function id. Returns 0 if we
+/// encounter errors (when there are no instrumented functions, etc.).
+extern size_t __xray_max_function_id();
-// Disables the XRay handler used to log first arguments of function calls.
-// Returns 1 on success, 0 on error.
-extern int __xray_remove_handler_arg1();
}
#endif
diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors.cc b/contrib/compiler-rt/lib/asan/asan_interceptors.cc
index 905dd2e23870..c6969c979a59 100644
--- a/contrib/compiler-rt/lib/asan/asan_interceptors.cc
+++ b/contrib/compiler-rt/lib/asan/asan_interceptors.cc
@@ -443,6 +443,13 @@ INTERCEPTOR(void, _longjmp, void *env, int val) {
}
#endif
+#if ASAN_INTERCEPT___LONGJMP_CHK
+INTERCEPTOR(void, __longjmp_chk, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(__longjmp_chk)(env, val);
+}
+#endif
+
#if ASAN_INTERCEPT_SIGLONGJMP
INTERCEPTOR(void, siglongjmp, void *env, int val) {
__asan_handle_no_return();
@@ -758,6 +765,9 @@ void InitializeAsanInterceptors() {
#if ASAN_INTERCEPT__LONGJMP
ASAN_INTERCEPT_FUNC(_longjmp);
#endif
+#if ASAN_INTERCEPT___LONGJMP_CHK
+ ASAN_INTERCEPT_FUNC(__longjmp_chk);
+#endif
#if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp);
#endif
diff --git a/contrib/compiler-rt/lib/asan/asan_interceptors.h b/contrib/compiler-rt/lib/asan/asan_interceptors.h
index d747c31a5d0f..93fca4f67366 100644
--- a/contrib/compiler-rt/lib/asan/asan_interceptors.h
+++ b/contrib/compiler-rt/lib/asan/asan_interceptors.h
@@ -58,6 +58,12 @@
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT___LONGJMP_CHK 1
+#else
+# define ASAN_INTERCEPT___LONGJMP_CHK 0
+#endif
+
// Android bug: https://code.google.com/p/android/issues/detail?id=61799
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \
!(SANITIZER_ANDROID && defined(__i386))
diff --git a/contrib/compiler-rt/lib/builtins/emutls.c b/contrib/compiler-rt/lib/builtins/emutls.c
index e8d5ddb22011..12aad3a42b76 100644
--- a/contrib/compiler-rt/lib/builtins/emutls.c
+++ b/contrib/compiler-rt/lib/builtins/emutls.c
@@ -98,7 +98,7 @@ static __inline emutls_address_array* emutls_getspecific() {
#else
-#include <Windows.h>
+#include <windows.h>
#include <malloc.h>
#include <stdio.h>
#include <assert.h>
diff --git a/contrib/compiler-rt/lib/builtins/int_types.h b/contrib/compiler-rt/lib/builtins/int_types.h
index 660385ecd6ae..a92238c5b730 100644
--- a/contrib/compiler-rt/lib/builtins/int_types.h
+++ b/contrib/compiler-rt/lib/builtins/int_types.h
@@ -60,9 +60,7 @@ typedef union
}s;
} udwords;
-/* MIPS64 issue: PR 20098 */
-#if (defined(__LP64__) || defined(__wasm__)) && \
- !(defined(__mips__) && defined(__clang__))
+#if (defined(__LP64__) || defined(__wasm__) || defined(__mips64))
#define CRT_HAS_128BIT
#endif
diff --git a/contrib/compiler-rt/lib/cfi/cfi_blacklist.txt b/contrib/compiler-rt/lib/cfi/cfi_blacklist.txt
index 1f0eeb355617..cc111be8120e 100644
--- a/contrib/compiler-rt/lib/cfi/cfi_blacklist.txt
+++ b/contrib/compiler-rt/lib/cfi/cfi_blacklist.txt
@@ -24,3 +24,8 @@ fun:_ZNSt3__19addressof*
# Windows C++ stdlib headers that contain bad unrelated casts.
src:*xmemory0
src:*xstddef
+
+# std::_Sp_counted_ptr_inplace::_Sp_counted_ptr_inplace() (libstdc++).
+# This ctor is used by std::make_shared and needs to cast to uninitialized T*
+# in order to call std::allocator_traits<T>::construct.
+fun:_ZNSt23_Sp_counted_ptr_inplace*
diff --git a/contrib/compiler-rt/lib/scudo/scudo_allocator.cpp b/contrib/compiler-rt/lib/scudo/scudo_allocator.cpp
index 2ccdcd903dad..5420fc9649ca 100644
--- a/contrib/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/contrib/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -368,11 +368,12 @@ struct ScudoAllocator {
void *Ptr;
uptr Salt;
uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
- ScudoThreadContext *ThreadContext = getThreadContext();
+ ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
Salt = getPrng(ThreadContext)->getNext();
Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
NeededSize, AllocationAlignment);
+ ThreadContext->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getNext();
@@ -434,9 +435,10 @@ struct ScudoAllocator {
if (BypassQuarantine) {
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header);
- ScudoThreadContext *ThreadContext = getThreadContext();
+ ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr);
+ ThreadContext->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
@@ -445,12 +447,13 @@ struct ScudoAllocator {
UnpackedHeader NewHeader = *Header;
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, Header);
- ScudoThreadContext *ThreadContext = getThreadContext();
+ ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
QuarantineCallback(
getAllocatorCache(ThreadContext)),
Chunk, Size);
+ ThreadContext->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
AllocatorQuarantine.Put(&FallbackQuarantineCache,
diff --git a/contrib/compiler-rt/lib/scudo/scudo_allocator.h b/contrib/compiler-rt/lib/scudo/scudo_allocator.h
index 2cac2de71cb0..f159deffb1d5 100644
--- a/contrib/compiler-rt/lib/scudo/scudo_allocator.h
+++ b/contrib/compiler-rt/lib/scudo/scudo_allocator.h
@@ -72,7 +72,13 @@ const uptr AlignedChunkHeaderSize =
#if SANITIZER_CAN_USE_ALLOCATOR64
const uptr AllocatorSpace = ~0ULL;
-const uptr AllocatorSize = 0x40000000000ULL; // 4TB.
+# if defined(__aarch64__) && SANITIZER_ANDROID
+const uptr AllocatorSize = 0x4000000000ULL; // 256G.
+# elif defined(__aarch64__)
+const uptr AllocatorSize = 0x10000000000ULL; // 1T.
+# else
+const uptr AllocatorSize = 0x40000000000ULL; // 4T.
+# endif
typedef DefaultSizeClassMap SizeClassMap;
struct AP {
static const uptr kSpaceBeg = AllocatorSpace;
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls.h b/contrib/compiler-rt/lib/scudo/scudo_tls.h
index 0d7d1bffd0b6..f6039bebec44 100644
--- a/contrib/compiler-rt/lib/scudo/scudo_tls.h
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls.h
@@ -19,10 +19,16 @@
#include "scudo_allocator.h"
#include "scudo_utils.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
namespace __scudo {
-struct ALIGNED(64) ScudoThreadContext {
- public:
+// Platform specific base thread context definitions.
+#include "scudo_tls_context_android.inc"
+#include "scudo_tls_context_linux.inc"
+
+struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform {
AllocatorCache Cache;
Xorshift128Plus Prng;
uptr QuarantineCachePlaceHolder[4];
@@ -32,8 +38,9 @@ struct ALIGNED(64) ScudoThreadContext {
void initThread();
-// Fastpath functions are defined in the following platform specific headers.
-#include "scudo_tls_linux.h"
+// Platform specific dastpath functions definitions.
+#include "scudo_tls_android.inc"
+#include "scudo_tls_linux.inc"
} // namespace __scudo
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls_android.cpp b/contrib/compiler-rt/lib/scudo/scudo_tls_android.cpp
new file mode 100644
index 000000000000..0e3602b2faf0
--- /dev/null
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls_android.cpp
@@ -0,0 +1,95 @@
+//===-- scudo_tls_android.cpp -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo thread local structure implementation for Android.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX && SANITIZER_ANDROID
+
+#include "scudo_tls.h"
+
+#include <pthread.h>
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+static pthread_key_t PThreadKey;
+
+static atomic_uint32_t ThreadContextCurrentIndex;
+static ScudoThreadContext *ThreadContexts;
+static uptr NumberOfContexts;
+
+// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used as they allocate memory.
+static uptr getNumberOfCPUs() {
+ cpu_set_t CPUs;
+ CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
+ return CPU_COUNT(&CPUs);
+}
+
+static void initOnce() {
+ // Hack: TLS_SLOT_TSAN was introduced in N. To be able to use it on M for
+ // testing, we create an unused key. Since the key_data array follows the tls
+ // array, it basically gives us the extra entry we need.
+ // TODO(kostyak): remove and restrict to N and above.
+ CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
+ initScudo();
+ NumberOfContexts = getNumberOfCPUs();
+ ThreadContexts = reinterpret_cast<ScudoThreadContext *>(
+ MmapOrDie(sizeof(ScudoThreadContext) * NumberOfContexts, __func__));
+ for (int i = 0; i < NumberOfContexts; i++)
+ ThreadContexts[i].init();
+}
+
+void initThread() {
+ pthread_once(&GlobalInitialized, initOnce);
+ // Initial context assignment is done in a plain round-robin fashion.
+ u32 Index = atomic_fetch_add(&ThreadContextCurrentIndex, 1,
+ memory_order_relaxed);
+ ScudoThreadContext *ThreadContext =
+ &ThreadContexts[Index % NumberOfContexts];
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(ThreadContext);
+}
+
+ScudoThreadContext *getThreadContextAndLockSlow() {
+ ScudoThreadContext *ThreadContext;
+ // Go through all the contexts and find the first unlocked one.
+ for (u32 i = 0; i < NumberOfContexts; i++) {
+ ThreadContext = &ThreadContexts[i];
+ if (ThreadContext->tryLock()) {
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(ThreadContext);
+ return ThreadContext;
+ }
+ }
+ // No luck, find the one with the lowest precedence, and slow lock it.
+ u64 Precedence = UINT64_MAX;
+ for (u32 i = 0; i < NumberOfContexts; i++) {
+ u64 SlowLockPrecedence = ThreadContexts[i].getSlowLockPrecedence();
+ if (SlowLockPrecedence && SlowLockPrecedence < Precedence) {
+ ThreadContext = &ThreadContexts[i];
+ Precedence = SlowLockPrecedence;
+ }
+ }
+ if (LIKELY(Precedence != UINT64_MAX)) {
+ ThreadContext->lock();
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(ThreadContext);
+ return ThreadContext;
+ }
+ // Last resort (can this happen?), stick with the current one.
+ ThreadContext =
+ reinterpret_cast<ScudoThreadContext *>(*get_android_tls_ptr());
+ ThreadContext->lock();
+ return ThreadContext;
+}
+
+} // namespace __scudo
+
+#endif // SANITIZER_LINUX && SANITIZER_ANDROID
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls_android.inc b/contrib/compiler-rt/lib/scudo/scudo_tls_android.inc
new file mode 100644
index 000000000000..8ecad7a30a6c
--- /dev/null
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls_android.inc
@@ -0,0 +1,44 @@
+//===-- scudo_tls_android.inc -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo thread local structure fastpath functions implementation for Android.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TLS_ANDROID_H_
+#define SCUDO_TLS_ANDROID_H_
+
+#ifndef SCUDO_TLS_H_
+# error "This file must be included inside scudo_tls.h."
+#endif // SCUDO_TLS_H_
+
+#if SANITIZER_LINUX && SANITIZER_ANDROID
+
+ALWAYS_INLINE void initThreadMaybe() {
+ if (LIKELY(*get_android_tls_ptr()))
+ return;
+ initThread();
+}
+
+ScudoThreadContext *getThreadContextAndLockSlow();
+
+ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() {
+ ScudoThreadContext *ThreadContext =
+ reinterpret_cast<ScudoThreadContext *>(*get_android_tls_ptr());
+ CHECK(ThreadContext);
+ // Try to lock the currently associated context.
+ if (ThreadContext->tryLock())
+ return ThreadContext;
+ // If it failed, go the slow path.
+ return getThreadContextAndLockSlow();
+}
+
+#endif // SANITIZER_LINUX && SANITIZER_ANDROID
+
+#endif // SCUDO_TLS_ANDROID_H_
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls_context_android.inc b/contrib/compiler-rt/lib/scudo/scudo_tls_context_android.inc
new file mode 100644
index 000000000000..f1951319d487
--- /dev/null
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls_context_android.inc
@@ -0,0 +1,54 @@
+//===-- scudo_tls_context_android.inc ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Android specific base thread context definition.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TLS_CONTEXT_ANDROID_INC_
+#define SCUDO_TLS_CONTEXT_ANDROID_INC_
+
+#ifndef SCUDO_TLS_H_
+# error "This file must be included inside scudo_tls.h."
+#endif // SCUDO_TLS_H_
+
+#if SANITIZER_LINUX && SANITIZER_ANDROID
+
+struct ScudoThreadContextPlatform {
+ INLINE bool tryLock() {
+ if (Mutex.TryLock()) {
+ atomic_store_relaxed(&SlowLockPrecedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&SlowLockPrecedence) == 0)
+ atomic_store_relaxed(&SlowLockPrecedence, NanoTime());
+ return false;
+ }
+
+ INLINE void lock() {
+ Mutex.Lock();
+ atomic_store_relaxed(&SlowLockPrecedence, 0);
+ }
+
+ INLINE void unlock() {
+ Mutex.Unlock();
+ }
+
+ INLINE u64 getSlowLockPrecedence() {
+ return atomic_load_relaxed(&SlowLockPrecedence);
+ }
+
+ private:
+ StaticSpinMutex Mutex;
+ atomic_uint64_t SlowLockPrecedence;
+};
+
+#endif // SANITIZER_LINUX && SANITIZER_ANDROID
+
+#endif // SCUDO_TLS_CONTEXT_ANDROID_INC_
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls_context_linux.inc b/contrib/compiler-rt/lib/scudo/scudo_tls_context_linux.inc
new file mode 100644
index 000000000000..8d292bdbc932
--- /dev/null
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls_context_linux.inc
@@ -0,0 +1,29 @@
+//===-- scudo_tls_context_linux.inc -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Linux specific base thread context definition.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TLS_CONTEXT_LINUX_INC_
+#define SCUDO_TLS_CONTEXT_LINUX_INC_
+
+#ifndef SCUDO_TLS_H_
+# error "This file must be included inside scudo_tls.h."
+#endif // SCUDO_TLS_H_
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+
+struct ScudoThreadContextPlatform {
+ ALWAYS_INLINE void unlock() {}
+};
+
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#endif // SCUDO_TLS_CONTEXT_LINUX_INC_
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls_linux.cpp b/contrib/compiler-rt/lib/scudo/scudo_tls_linux.cpp
index 3453367f8a53..5a9cc998bccf 100644
--- a/contrib/compiler-rt/lib/scudo/scudo_tls_linux.cpp
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls_linux.cpp
@@ -14,7 +14,7 @@
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
#include "scudo_tls.h"
@@ -26,8 +26,10 @@ namespace __scudo {
static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
static pthread_key_t PThreadKey;
-thread_local ThreadState ScudoThreadState = ThreadNotInitialized;
-thread_local ScudoThreadContext ThreadLocalContext;
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoThreadContext ThreadLocalContext;
static void teardownThread(void *Ptr) {
uptr Iteration = reinterpret_cast<uptr>(Ptr);
@@ -59,4 +61,4 @@ void initThread() {
} // namespace __scudo
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
diff --git a/contrib/compiler-rt/lib/scudo/scudo_tls_linux.h b/contrib/compiler-rt/lib/scudo/scudo_tls_linux.inc
index 0994f2d7b24d..242ee3329ea8 100644
--- a/contrib/compiler-rt/lib/scudo/scudo_tls_linux.h
+++ b/contrib/compiler-rt/lib/scudo/scudo_tls_linux.inc
@@ -1,4 +1,4 @@
-//===-- scudo_tls_linux.h ---------------------------------------*- C++ -*-===//
+//===-- scudo_tls_linux.inc -------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,17 +19,17 @@
# error "This file must be included inside scudo_tls.h."
#endif // SCUDO_TLS_H_
-#include "sanitizer_common/sanitizer_platform.h"
-
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
enum ThreadState : u8 {
ThreadNotInitialized = 0,
ThreadInitialized,
ThreadTornDown,
};
-extern thread_local ThreadState ScudoThreadState;
-extern thread_local ScudoThreadContext ThreadLocalContext;
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ThreadState ScudoThreadState;
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoThreadContext ThreadLocalContext;
ALWAYS_INLINE void initThreadMaybe() {
if (LIKELY(ScudoThreadState != ThreadNotInitialized))
@@ -37,12 +37,12 @@ ALWAYS_INLINE void initThreadMaybe() {
initThread();
}
-ALWAYS_INLINE ScudoThreadContext *getThreadContext() {
+ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() {
if (UNLIKELY(ScudoThreadState == ThreadTornDown))
return nullptr;
return &ThreadLocalContext;
}
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#endif // SCUDO_TLS_LINUX_H_
diff --git a/contrib/compiler-rt/lib/ubsan/ubsan_diag_standalone.cc b/contrib/compiler-rt/lib/ubsan/ubsan_diag_standalone.cc
new file mode 100644
index 000000000000..df8ed5fcdf6d
--- /dev/null
+++ b/contrib/compiler-rt/lib/ubsan/ubsan_diag_standalone.cc
@@ -0,0 +1,37 @@
+//===-- ubsan_diag_standalone.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Diagnostic reporting for the standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_diag.h"
+
+using namespace __ubsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ uptr top = 0;
+ uptr bottom = 0;
+ bool request_fast_unwind = common_flags()->fast_unwind_on_fatal;
+ if (request_fast_unwind)
+ __sanitizer::GetThreadStackTopAndBottom(false, &top, &bottom);
+
+ GET_REPORT_OPTIONS(false);
+ BufferedStackTrace stack;
+ stack.Unwind(kStackTraceMax, Opts.pc, Opts.bp, nullptr, top, bottom,
+ request_fast_unwind);
+ stack.Print();
+}
+} // extern "C"
+
+#endif // CAN_SANITIZE_UB
diff --git a/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc b/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc
index de13ab893bec..d6a8f52a27b4 100644
--- a/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc
+++ b/contrib/compiler-rt/lib/ubsan/ubsan_handlers.cc
@@ -410,7 +410,8 @@ static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val,
SourceLocation Loc = Data->Loc.acquire();
// This check could be more precise if we used different handlers for
// -fsanitize=bool and -fsanitize=enum.
- bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'"));
+ bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")) ||
+ (0 == internal_strncmp(Data->Type.getTypeName(), "'BOOL'", 6));
ErrorType ET =
IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad;
diff --git a/contrib/compiler-rt/lib/xray/xray_init.cc b/contrib/compiler-rt/lib/xray/xray_init.cc
index 6f558d656147..aa660baa9920 100644
--- a/contrib/compiler-rt/lib/xray/xray_init.cc
+++ b/contrib/compiler-rt/lib/xray/xray_init.cc
@@ -25,6 +25,8 @@ extern "C" {
void __xray_init();
extern const XRaySledEntry __start_xray_instr_map[] __attribute__((weak));
extern const XRaySledEntry __stop_xray_instr_map[] __attribute__((weak));
+extern const XRayFunctionSledIndex __start_xray_fn_idx[] __attribute__((weak));
+extern const XRayFunctionSledIndex __stop_xray_fn_idx[] __attribute__((weak));
}
using namespace __xray;
@@ -55,6 +57,8 @@ void __xray_init() XRAY_NEVER_INSTRUMENT {
__sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
XRayInstrMap.Sleds = __start_xray_instr_map;
XRayInstrMap.Entries = __stop_xray_instr_map - __start_xray_instr_map;
+ XRayInstrMap.SledsIndex = __start_xray_fn_idx;
+ XRayInstrMap.Functions = __stop_xray_fn_idx - __start_xray_fn_idx;
}
__sanitizer::atomic_store(&XRayInitialized, true,
__sanitizer::memory_order_release);
diff --git a/contrib/compiler-rt/lib/xray/xray_interface.cc b/contrib/compiler-rt/lib/xray/xray_interface.cc
index 26ec161fe860..26f0ab122db2 100644
--- a/contrib/compiler-rt/lib/xray/xray_interface.cc
+++ b/contrib/compiler-rt/lib/xray/xray_interface.cc
@@ -132,12 +132,48 @@ CleanupInvoker<Function> scopeCleanup(Function Fn) XRAY_NEVER_INSTRUMENT {
return CleanupInvoker<Function>{Fn};
}
+inline bool patchSled(const XRaySledEntry &Sled, bool Enable,
+ int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ // While we're here, we should patch the nop sled. To do that we mprotect
+ // the page containing the function to be writeable.
+ const uint64_t PageSize = GetPageSizeCached();
+ void *PageAlignedAddr =
+ reinterpret_cast<void *>(Sled.Address & ~(PageSize - 1));
+ std::size_t MProtectLen = (Sled.Address + cSledLength) -
+ reinterpret_cast<uint64_t>(PageAlignedAddr);
+ MProtectHelper Protector(PageAlignedAddr, MProtectLen);
+ if (Protector.MakeWriteable() == -1) {
+ printf("Failed mprotect: %d\n", errno);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ bool Success = false;
+ switch (Sled.Kind) {
+ case XRayEntryType::ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
+ break;
+ case XRayEntryType::EXIT:
+ Success = patchFunctionExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::TAIL:
+ Success = patchFunctionTailExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
+ break;
+ default:
+ Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
+ return false;
+ }
+ return Success;
+}
+
// controlPatching implements the common internals of the patching/unpatching
// implementation. |Enable| defines whether we're enabling or disabling the
// runtime XRay instrumentation.
XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
if (!__sanitizer::atomic_load(&XRayInitialized,
- __sanitizer::memory_order_acquire))
+ __sanitizer::memory_order_acquire))
return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
uint8_t NotPatching = false;
@@ -179,38 +215,7 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
++FuncId;
CurFun = F;
}
-
- // While we're here, we should patch the nop sled. To do that we mprotect
- // the page containing the function to be writeable.
- void *PageAlignedAddr =
- reinterpret_cast<void *>(Sled.Address & ~(PageSize - 1));
- std::size_t MProtectLen = (Sled.Address + cSledLength) -
- reinterpret_cast<uint64_t>(PageAlignedAddr);
- MProtectHelper Protector(PageAlignedAddr, MProtectLen);
- if (Protector.MakeWriteable() == -1) {
- printf("Failed mprotect: %d\n", errno);
- return XRayPatchingStatus::FAILED;
- }
-
- bool Success = false;
- switch (Sled.Kind) {
- case XRayEntryType::ENTRY:
- Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
- break;
- case XRayEntryType::EXIT:
- Success = patchFunctionExit(Enable, FuncId, Sled);
- break;
- case XRayEntryType::TAIL:
- Success = patchFunctionTailExit(Enable, FuncId, Sled);
- break;
- case XRayEntryType::LOG_ARGS_ENTRY:
- Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
- break;
- default:
- Report("Unsupported sled kind: %d\n", int(Sled.Kind));
- continue;
- }
- (void)Success;
+ patchSled(Sled, Enable, FuncId);
}
__sanitizer::atomic_store(&XRayPatching, false,
__sanitizer::memory_order_release);
@@ -226,6 +231,64 @@ XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
return controlPatching(false);
}
+XRayPatchingStatus patchFunction(int32_t FuncId,
+ bool Enable) XRAY_NEVER_INSTRUMENT {
+ if (!__sanitizer::atomic_load(&XRayInitialized,
+ __sanitizer::memory_order_acquire))
+ return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
+
+ uint8_t NotPatching = false;
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel))
+ return XRayPatchingStatus::ONGOING; // Already patching.
+
+ // Next, we look for the function index.
+ XRaySledMap InstrMap;
+ {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
+
+ // If we don't have an index, we can't patch individual functions.
+ if (InstrMap.Functions == 0)
+ return XRayPatchingStatus::NOT_INITIALIZED;
+
+ // FuncId must be a positive number, less than the number of functions
+ // instrumented.
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
+ Report("Invalid function id provided: %d\n", FuncId);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ // Now we patch ths sleds for this specific function.
+ auto SledRange = InstrMap.SledsIndex[FuncId - 1];
+ auto *f = SledRange.Begin;
+ auto *e = SledRange.End;
+
+ bool SucceedOnce = false;
+ while (f != e)
+ SucceedOnce |= patchSled(*f++, Enable, FuncId);
+
+ __sanitizer::atomic_store(&XRayPatching, false,
+ __sanitizer::memory_order_release);
+
+ if (!SucceedOnce) {
+ Report("Failed patching any sled for function '%d'.", FuncId);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ return XRayPatchingStatus::SUCCESS;
+}
+
+XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ return patchFunction(FuncId, true);
+}
+
+XRayPatchingStatus
+__xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ return patchFunction(FuncId, false);
+}
+
int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
if (!__sanitizer::atomic_load(&XRayInitialized,
__sanitizer::memory_order_acquire))
@@ -239,3 +302,15 @@ int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
return 1;
}
int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
+
+uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
+ return 0;
+ return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Address;
+}
+
+size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
+ __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
+ return XRayInstrMap.Functions;
+}
diff --git a/contrib/compiler-rt/lib/xray/xray_interface_internal.h b/contrib/compiler-rt/lib/xray/xray_interface_internal.h
index 0e3a251f3ad7..ef0c6b15809b 100644
--- a/contrib/compiler-rt/lib/xray/xray_interface_internal.h
+++ b/contrib/compiler-rt/lib/xray/xray_interface_internal.h
@@ -39,6 +39,11 @@ struct XRaySledEntry {
#error "Unsupported word size."
#endif
};
+
+struct XRayFunctionSledIndex {
+ const XRaySledEntry* Begin;
+ const XRaySledEntry* End;
+};
}
namespace __xray {
@@ -46,6 +51,8 @@ namespace __xray {
struct XRaySledMap {
const XRaySledEntry *Sleds;
size_t Entries;
+ const XRayFunctionSledIndex *SledsIndex;
+ size_t Functions;
};
bool patchFunctionEntry(bool Enable, uint32_t FuncId,
diff --git a/contrib/libc++/include/__config b/contrib/libc++/include/__config
index ee46860495ae..2a2907494b2b 100644
--- a/contrib/libc++/include/__config
+++ b/contrib/libc++/include/__config
@@ -314,7 +314,7 @@ typedef __char32_t char32_t;
#define _LIBCPP_NO_EXCEPTIONS
#endif
-#if !(__has_feature(cxx_rtti))
+#if !(__has_feature(cxx_rtti)) && !defined(_LIBCPP_NO_RTTI)
#define _LIBCPP_NO_RTTI
#endif
@@ -1089,6 +1089,13 @@ _LIBCPP_FUNC_VIS extern "C" void __sanitizer_annotate_contiguous_container(
# define _LIBCPP_DIAGNOSE_ERROR(...)
#endif
+#if __has_attribute(fallthough) || _GNUC_VER >= 700
+// Use a function like macro to imply that it must be followed by a semicolon
+#define _LIBCPP_FALLTHROUGH() __attribute__((__fallthrough__))
+#else
+#define _LIBCPP_FALLTHROUGH() ((void)0)
+#endif
+
#if defined(_LIBCPP_ABI_MICROSOFT) && \
(defined(_LIBCPP_COMPILER_MSVC) || __has_declspec_attribute(empty_bases))
# define _LIBCPP_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
@@ -1113,4 +1120,77 @@ _LIBCPP_FUNC_VIS extern "C" void __sanitizer_annotate_contiguous_container(
#endif // __cplusplus
+// Decide whether to use availability macros.
+#if !defined(_LIBCPP_BUILDING_LIBRARY) && \
+ !defined(_LIBCPP_DISABLE_AVAILABILITY) && \
+ __has_feature(attribute_availability_with_strict) && \
+ __has_feature(attribute_availability_in_templates)
+#ifdef __APPLE__
+#define _LIBCPP_USE_AVAILABILITY_APPLE
+#endif
+#endif
+
+// Define availability macros.
+#if defined(_LIBCPP_USE_AVAILABILITY_APPLE)
+#define _LIBCPP_AVAILABILITY_SHARED_MUTEX \
+ __attribute__((availability(macosx,strict,introduced=10.12))) \
+ __attribute__((availability(ios,strict,introduced=10.0))) \
+ __attribute__((availability(tvos,strict,introduced=10.0))) \
+ __attribute__((availability(watchos,strict,introduced=3.0)))
+#define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS __attribute__((unavailable))
+#define _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH __attribute__((unavailable))
+#define _LIBCPP_AVAILABILITY_UNCAUGHT_EXCEPTIONS \
+ __attribute__((availability(macosx,strict,introduced=10.12))) \
+ __attribute__((availability(ios,strict,introduced=10.0))) \
+ __attribute__((availability(tvos,strict,introduced=10.0))) \
+ __attribute__((availability(watchos,strict,introduced=3.0)))
+#define _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE \
+ __attribute__((availability(macosx,strict,introduced=10.12))) \
+ __attribute__((availability(ios,strict,introduced=10.0))) \
+ __attribute__((availability(tvos,strict,introduced=10.0))) \
+ __attribute__((availability(watchos,strict,introduced=3.0)))
+#define _LIBCPP_AVAILABILITY_FUTURE_ERROR \
+ __attribute__((availability(ios,strict,introduced=6.0)))
+#define _LIBCPP_AVAILABILITY_TYPEINFO_VTABLE \
+ __attribute__((availability(macosx,strict,introduced=10.9))) \
+ __attribute__((availability(ios,strict,introduced=7.0)))
+#define _LIBCPP_AVAILABILITY_LOCALE_CATEGORY \
+ __attribute__((availability(macosx,strict,introduced=10.9))) \
+ __attribute__((availability(ios,strict,introduced=7.0)))
+#define _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR \
+ __attribute__((availability(macosx,strict,introduced=10.9))) \
+ __attribute__((availability(ios,strict,introduced=7.0)))
+#else
+#define _LIBCPP_AVAILABILITY_SHARED_MUTEX
+#define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
+#define _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH
+#define _LIBCPP_AVAILABILITY_UNCAUGHT_EXCEPTIONS
+#define _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE
+#define _LIBCPP_AVAILABILITY_FUTURE_ERROR
+#define _LIBCPP_AVAILABILITY_TYPEINFO_VTABLE
+#define _LIBCPP_AVAILABILITY_LOCALE_CATEGORY
+#define _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
+#endif
+
+// Define availability that depends on _LIBCPP_NO_EXCEPTIONS.
+#ifdef _LIBCPP_NO_EXCEPTIONS
+#define _LIBCPP_AVAILABILITY_DYNARRAY
+#define _LIBCPP_AVAILABILITY_FUTURE
+#else
+#define _LIBCPP_AVAILABILITY_DYNARRAY _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH
+#define _LIBCPP_AVAILABILITY_FUTURE _LIBCPP_AVAILABILITY_FUTURE_ERROR
+#endif
+
+// Availability of stream API in the dylib got dropped and re-added. The
+// extern template should effectively be available at:
+// availability(macosx,introduced=10.9)
+// availability(ios,introduced=7.0)
+#if defined(_LIBCPP_USE_AVAILABILITY_APPLE) && \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ <= 1090) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ <= 70000))
+#define _LIBCPP_AVAILABILITY_NO_STREAMS_EXTERN_TEMPLATE
+#endif
+
#endif // _LIBCPP_CONFIG
diff --git a/contrib/libc++/include/__locale b/contrib/libc++/include/__locale
index 918cd1df5c6b..4184e7e03489 100644
--- a/contrib/libc++/include/__locale
+++ b/contrib/libc++/include/__locale
@@ -69,6 +69,7 @@ public:
class _LIBCPP_TYPE_VIS id;
typedef int category;
+ _LIBCPP_AVAILABILITY_LOCALE_CATEGORY
static const category // values assigned here are for exposition only
none = 0,
collate = LC_COLLATE_MASK,
diff --git a/contrib/libc++/include/__threading_support b/contrib/libc++/include/__threading_support
index aa947139a4e9..080ebd256b8f 100644
--- a/contrib/libc++/include/__threading_support
+++ b/contrib/libc++/include/__threading_support
@@ -474,7 +474,10 @@ int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m,
timeout_ms.count() > 0 ? timeout_ms.count()
: 0,
0))
- return GetLastError();
+ {
+ auto __ec = GetLastError();
+ return __ec == ERROR_TIMEOUT ? ETIMEDOUT : __ec;
+ }
return 0;
}
diff --git a/contrib/libc++/include/exception b/contrib/libc++/include/exception
index f12ae42093aa..ca2eaf5c6a04 100644
--- a/contrib/libc++/include/exception
+++ b/contrib/libc++/include/exception
@@ -127,30 +127,33 @@ _LIBCPP_FUNC_VIS terminate_handler get_terminate() _NOEXCEPT;
_LIBCPP_NORETURN _LIBCPP_FUNC_VIS void terminate() _NOEXCEPT;
_LIBCPP_FUNC_VIS bool uncaught_exception() _NOEXCEPT;
-_LIBCPP_FUNC_VIS int uncaught_exceptions() _NOEXCEPT;
+_LIBCPP_FUNC_VIS _LIBCPP_AVAILABILITY_UNCAUGHT_EXCEPTIONS int uncaught_exceptions() _NOEXCEPT;
class _LIBCPP_TYPE_VIS exception_ptr;
_LIBCPP_FUNC_VIS exception_ptr current_exception() _NOEXCEPT;
_LIBCPP_NORETURN _LIBCPP_FUNC_VIS void rethrow_exception(exception_ptr);
+#ifndef _LIBCPP_ABI_MICROSOFT
+
class _LIBCPP_TYPE_VIS exception_ptr
{
void* __ptr_;
public:
_LIBCPP_INLINE_VISIBILITY exception_ptr() _NOEXCEPT : __ptr_() {}
_LIBCPP_INLINE_VISIBILITY exception_ptr(nullptr_t) _NOEXCEPT : __ptr_() {}
+
exception_ptr(const exception_ptr&) _NOEXCEPT;
exception_ptr& operator=(const exception_ptr&) _NOEXCEPT;
~exception_ptr() _NOEXCEPT;
- _LIBCPP_INLINE_VISIBILITY
- _LIBCPP_EXPLICIT
- operator bool() const _NOEXCEPT {return __ptr_ != nullptr;}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_EXPLICIT operator bool() const _NOEXCEPT
+ {return __ptr_ != nullptr;}
friend _LIBCPP_INLINE_VISIBILITY
bool operator==(const exception_ptr& __x, const exception_ptr& __y) _NOEXCEPT
{return __x.__ptr_ == __y.__ptr_;}
+
friend _LIBCPP_INLINE_VISIBILITY
bool operator!=(const exception_ptr& __x, const exception_ptr& __y) _NOEXCEPT
{return !(__x == __y);}
@@ -178,6 +181,54 @@ make_exception_ptr(_Ep __e) _NOEXCEPT
#endif
}
+#else // _LIBCPP_ABI_MICROSOFT
+
+class _LIBCPP_TYPE_VIS exception_ptr
+{
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunused-private-field"
+#endif
+ void* __ptr1_;
+ void* __ptr2_;
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+public:
+ exception_ptr() _NOEXCEPT;
+ exception_ptr(nullptr_t) _NOEXCEPT;
+ exception_ptr(const exception_ptr& __other) _NOEXCEPT;
+ exception_ptr& operator=(const exception_ptr& __other) _NOEXCEPT;
+ exception_ptr& operator=(nullptr_t) _NOEXCEPT;
+ ~exception_ptr() _NOEXCEPT;
+ _LIBCPP_EXPLICIT operator bool() const _NOEXCEPT;
+};
+
+_LIBCPP_FUNC_VIS
+bool operator==(const exception_ptr& __x, const exception_ptr& __y) _NOEXCEPT;
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool operator!=(const exception_ptr& __x, const exception_ptr& __y) _NOEXCEPT
+ {return !(__x == __y);}
+
+_LIBCPP_FUNC_VIS void swap(exception_ptr&, exception_ptr&) _NOEXCEPT;
+
+_LIBCPP_FUNC_VIS exception_ptr __copy_exception_ptr(void *__except, const void* __ptr);
+_LIBCPP_FUNC_VIS exception_ptr current_exception() _NOEXCEPT;
+_LIBCPP_NORETURN _LIBCPP_FUNC_VIS void rethrow_exception(exception_ptr p);
+
+// This is a built-in template function which automagically extracts the required
+// information.
+template <class _E> void *__GetExceptionInfo(_E);
+
+template<class _Ep>
+exception_ptr
+make_exception_ptr(_Ep __e) _NOEXCEPT
+{
+ return __copy_exception_ptr(_VSTD::addressof(__e), __GetExceptionInfo(__e));
+}
+
+#endif // _LIBCPP_ABI_MICROSOFT
// nested_exception
class _LIBCPP_EXCEPTION_ABI nested_exception
diff --git a/contrib/libc++/include/experimental/dynarray b/contrib/libc++/include/experimental/dynarray
index 8c9733770c3b..f96a0e5fed38 100644
--- a/contrib/libc++/include/experimental/dynarray
+++ b/contrib/libc++/include/experimental/dynarray
@@ -110,7 +110,7 @@ public:
namespace std { namespace experimental { inline namespace __array_extensions_v1 {
template <class _Tp>
-struct _LIBCPP_TEMPLATE_VIS dynarray
+struct _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_DYNARRAY dynarray
{
public:
// types:
diff --git a/contrib/libc++/include/experimental/optional b/contrib/libc++/include/experimental/optional
index f32941b1a8e7..48adfbae5167 100644
--- a/contrib/libc++/include/experimental/optional
+++ b/contrib/libc++/include/experimental/optional
@@ -145,7 +145,7 @@ namespace std { namespace experimental { inline namespace fundamentals_v1 {
#include <stdexcept>
_LIBCPP_BEGIN_NAMESPACE_EXPERIMENTAL
-class _LIBCPP_EXCEPTION_ABI bad_optional_access
+class _LIBCPP_EXCEPTION_ABI _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS bad_optional_access
: public std::logic_error
{
public:
@@ -523,6 +523,9 @@ public:
constexpr explicit operator bool() const noexcept {return this->__engaged_;}
_LIBCPP_NORETURN _LIBCPP_INLINE_VISIBILITY
+#ifndef _LIBCPP_NO_EXCEPTIONS
+_LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
+#endif
constexpr void __throw_bad_optional_access() const
{
#ifndef _LIBCPP_NO_EXCEPTIONS
@@ -532,7 +535,7 @@ public:
#endif
}
- _LIBCPP_INLINE_VISIBILITY
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
constexpr value_type const& value() const
{
if (!this->__engaged_)
@@ -540,7 +543,7 @@ public:
return this->__val_;
}
- _LIBCPP_INLINE_VISIBILITY
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
value_type& value()
{
if (!this->__engaged_)
diff --git a/contrib/libc++/include/functional b/contrib/libc++/include/functional
index 386fe678bff3..ea35697d3bbf 100644
--- a/contrib/libc++/include/functional
+++ b/contrib/libc++/include/functional
@@ -2224,7 +2224,7 @@ typename __bind_return<_Fp, _BoundArgs, _Args>::type
__apply_functor(_Fp& __f, _BoundArgs& __bound_args, __tuple_indices<_Indx...>,
_Args&& __args)
{
- return __invoke(__f, __mu(_VSTD::get<_Indx>(__bound_args), __args)...);
+ return _VSTD::__invoke(__f, _VSTD::__mu(_VSTD::get<_Indx>(__bound_args), __args)...);
}
template<class _Fp, class ..._BoundArgs>
@@ -2257,7 +2257,7 @@ public:
typename __bind_return<_Fd, _Td, tuple<_Args&&...> >::type
operator()(_Args&& ...__args)
{
- return __apply_functor(__f_, __bound_args_, __indices(),
+ return _VSTD::__apply_functor(__f_, __bound_args_, __indices(),
tuple<_Args&&...>(_VSTD::forward<_Args>(__args)...));
}
@@ -2266,7 +2266,7 @@ public:
typename __bind_return<const _Fd, const _Td, tuple<_Args&&...> >::type
operator()(_Args&& ...__args) const
{
- return __apply_functor(__f_, __bound_args_, __indices(),
+ return _VSTD::__apply_functor(__f_, __bound_args_, __indices(),
tuple<_Args&&...>(_VSTD::forward<_Args>(__args)...));
}
};
diff --git a/contrib/libc++/include/future b/contrib/libc++/include/future
index 1ceedf91e9cd..e38876758e13 100644
--- a/contrib/libc++/include/future
+++ b/contrib/libc++/include/future
@@ -499,7 +499,7 @@ make_error_condition(future_errc __e) _NOEXCEPT
return error_condition(static_cast<int>(__e), future_category());
}
-class _LIBCPP_EXCEPTION_ABI future_error
+class _LIBCPP_EXCEPTION_ABI _LIBCPP_AVAILABILITY_FUTURE_ERROR future_error
: public logic_error
{
error_code __ec_;
@@ -515,6 +515,9 @@ public:
};
_LIBCPP_NORETURN inline _LIBCPP_ALWAYS_INLINE
+#ifndef _LIBCPP_NO_EXCEPTIONS
+_LIBCPP_AVAILABILITY_FUTURE_ERROR
+#endif
void __throw_future_error(future_errc _Ev)
{
#ifndef _LIBCPP_NO_EXCEPTIONS
@@ -525,7 +528,7 @@ void __throw_future_error(future_errc _Ev)
#endif
}
-class _LIBCPP_TYPE_VIS __assoc_sub_state
+class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_FUTURE __assoc_sub_state
: public __shared_count
{
protected:
@@ -612,7 +615,7 @@ __assoc_sub_state::wait_for(const chrono::duration<_Rep, _Period>& __rel_time) c
}
template <class _Rp>
-class __assoc_state
+class _LIBCPP_AVAILABILITY_FUTURE __assoc_state
: public __assoc_sub_state
{
typedef __assoc_sub_state base;
@@ -652,6 +655,7 @@ __assoc_state<_Rp>::__on_zero_shared() _NOEXCEPT
template <class _Rp>
template <class _Arg>
+_LIBCPP_AVAILABILITY_FUTURE
void
#ifndef _LIBCPP_HAS_NO_RVALUE_REFERENCES
__assoc_state<_Rp>::set_value(_Arg&& __arg)
@@ -707,7 +711,7 @@ __assoc_state<_Rp>::copy()
}
template <class _Rp>
-class __assoc_state<_Rp&>
+class _LIBCPP_AVAILABILITY_FUTURE __assoc_state<_Rp&>
: public __assoc_sub_state
{
typedef __assoc_sub_state base;
@@ -767,7 +771,7 @@ __assoc_state<_Rp&>::copy()
}
template <class _Rp, class _Alloc>
-class __assoc_state_alloc
+class _LIBCPP_AVAILABILITY_FUTURE __assoc_state_alloc
: public __assoc_state<_Rp>
{
typedef __assoc_state<_Rp> base;
@@ -795,7 +799,7 @@ __assoc_state_alloc<_Rp, _Alloc>::__on_zero_shared() _NOEXCEPT
}
template <class _Rp, class _Alloc>
-class __assoc_state_alloc<_Rp&, _Alloc>
+class _LIBCPP_AVAILABILITY_FUTURE __assoc_state_alloc<_Rp&, _Alloc>
: public __assoc_state<_Rp&>
{
typedef __assoc_state<_Rp&> base;
@@ -821,7 +825,7 @@ __assoc_state_alloc<_Rp&, _Alloc>::__on_zero_shared() _NOEXCEPT
}
template <class _Alloc>
-class __assoc_sub_state_alloc
+class _LIBCPP_AVAILABILITY_FUTURE __assoc_sub_state_alloc
: public __assoc_sub_state
{
typedef __assoc_sub_state base;
@@ -847,7 +851,7 @@ __assoc_sub_state_alloc<_Alloc>::__on_zero_shared() _NOEXCEPT
}
template <class _Rp, class _Fp>
-class __deferred_assoc_state
+class _LIBCPP_AVAILABILITY_FUTURE __deferred_assoc_state
: public __assoc_state<_Rp>
{
typedef __assoc_state<_Rp> base;
@@ -894,7 +898,7 @@ __deferred_assoc_state<_Rp, _Fp>::__execute()
}
template <class _Fp>
-class __deferred_assoc_state<void, _Fp>
+class _LIBCPP_AVAILABILITY_FUTURE __deferred_assoc_state<void, _Fp>
: public __assoc_sub_state
{
typedef __assoc_sub_state base;
@@ -942,7 +946,7 @@ __deferred_assoc_state<void, _Fp>::__execute()
}
template <class _Rp, class _Fp>
-class __async_assoc_state
+class _LIBCPP_AVAILABILITY_FUTURE __async_assoc_state
: public __assoc_state<_Rp>
{
typedef __assoc_state<_Rp> base;
@@ -997,7 +1001,7 @@ __async_assoc_state<_Rp, _Fp>::__on_zero_shared() _NOEXCEPT
}
template <class _Fp>
-class __async_assoc_state<void, _Fp>
+class _LIBCPP_AVAILABILITY_FUTURE __async_assoc_state<void, _Fp>
: public __assoc_sub_state
{
typedef __assoc_sub_state base;
@@ -1076,7 +1080,7 @@ __make_async_assoc_state(_Fp __f);
#endif
template <class _Rp>
-class _LIBCPP_TEMPLATE_VIS future
+class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FUTURE future
{
__assoc_state<_Rp>* __state_;
@@ -1179,7 +1183,7 @@ future<_Rp>::get()
}
template <class _Rp>
-class _LIBCPP_TEMPLATE_VIS future<_Rp&>
+class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FUTURE future<_Rp&>
{
__assoc_state<_Rp&>* __state_;
@@ -1277,7 +1281,7 @@ future<_Rp&>::get()
}
template <>
-class _LIBCPP_TYPE_VIS future<void>
+class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_FUTURE future<void>
{
__assoc_sub_state* __state_;
@@ -1360,7 +1364,7 @@ swap(future<_Rp>& __x, future<_Rp>& __y) _NOEXCEPT
template <class _Callable> class packaged_task;
template <class _Rp>
-class _LIBCPP_TEMPLATE_VIS promise
+class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FUTURE promise
{
__assoc_state<_Rp>* __state_;
@@ -1527,7 +1531,7 @@ promise<_Rp>::set_exception_at_thread_exit(exception_ptr __p)
// promise<R&>
template <class _Rp>
-class _LIBCPP_TEMPLATE_VIS promise<_Rp&>
+class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FUTURE promise<_Rp&>
{
__assoc_state<_Rp&>* __state_;
@@ -1663,7 +1667,7 @@ promise<_Rp&>::set_exception_at_thread_exit(exception_ptr __p)
// promise<void>
template <>
-class _LIBCPP_TYPE_VIS promise<void>
+class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_FUTURE promise<void>
{
__assoc_sub_state* __state_;
@@ -1749,7 +1753,7 @@ template <class _Rp, class _Alloc>
template<class _Fp> class __packaged_task_base;
template<class _Rp, class ..._ArgTypes>
-class __packaged_task_base<_Rp(_ArgTypes...)>
+class _LIBCPP_AVAILABILITY_FUTURE __packaged_task_base<_Rp(_ArgTypes...)>
{
__packaged_task_base(const __packaged_task_base&);
__packaged_task_base& operator=(const __packaged_task_base&);
@@ -1767,7 +1771,7 @@ public:
template<class _FD, class _Alloc, class _FB> class __packaged_task_func;
template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
-class __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>
+class _LIBCPP_AVAILABILITY_FUTURE __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>
: public __packaged_task_base<_Rp(_ArgTypes...)>
{
__compressed_pair<_Fp, _Alloc> __f_;
@@ -1825,7 +1829,7 @@ __packaged_task_func<_Fp, _Alloc, _Rp(_ArgTypes...)>::operator()(_ArgTypes&& ...
template <class _Callable> class __packaged_task_function;
template<class _Rp, class ..._ArgTypes>
-class __packaged_task_function<_Rp(_ArgTypes...)>
+class _LIBCPP_AVAILABILITY_FUTURE __packaged_task_function<_Rp(_ArgTypes...)>
{
typedef __packaged_task_base<_Rp(_ArgTypes...)> __base;
typename aligned_storage<3*sizeof(void*)>::type __buf_;
@@ -2000,7 +2004,7 @@ __packaged_task_function<_Rp(_ArgTypes...)>::operator()(_ArgTypes... __arg) cons
}
template<class _Rp, class ..._ArgTypes>
-class _LIBCPP_TEMPLATE_VIS packaged_task<_Rp(_ArgTypes...)>
+class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FUTURE packaged_task<_Rp(_ArgTypes...)>
{
public:
typedef _Rp result_type; // extension
@@ -2129,7 +2133,7 @@ packaged_task<_Rp(_ArgTypes...)>::reset()
}
template<class ..._ArgTypes>
-class _LIBCPP_TEMPLATE_VIS packaged_task<void(_ArgTypes...)>
+class _LIBCPP_TEMPLATE_VIS _LIBCPP_AVAILABILITY_FUTURE packaged_task<void(_ArgTypes...)>
{
public:
typedef void result_type; // extension
@@ -2517,7 +2521,7 @@ shared_future<_Rp&>::operator=(const shared_future& __rhs)
}
template <>
-class _LIBCPP_TYPE_VIS shared_future<void>
+class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_FUTURE shared_future<void>
{
__assoc_sub_state* __state_;
diff --git a/contrib/libc++/include/istream b/contrib/libc++/include/istream
index 9a8bb44ef3ec..530f204cfad9 100644
--- a/contrib/libc++/include/istream
+++ b/contrib/libc++/include/istream
@@ -1675,9 +1675,11 @@ operator>>(basic_istream<_CharT, _Traits>& __is, bitset<_Size>& __x)
return __is;
}
+#ifndef _LIBCPP_AVAILABILITY_NO_STREAMS_EXTERN_TEMPLATE
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_istream<char>)
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_istream<wchar_t>)
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_iostream<char>)
+#endif
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/libc++/include/locale b/contrib/libc++/include/locale
index 6bce16e64884..ad1c1f0083ec 100644
--- a/contrib/libc++/include/locale
+++ b/contrib/libc++/include/locale
@@ -1402,6 +1402,7 @@ num_put<_CharT, _OutputIterator>::do_put(iter_type __s, ios_base& __iob,
this->__format_int(__fmt+1, __len, true, __iob.flags());
const unsigned __nbuf = (numeric_limits<long>::digits / 3)
+ ((numeric_limits<long>::digits % 3) != 0)
+ + ((__iob.flags() & ios_base::showbase) != 0)
+ 2;
char __nar[__nbuf];
int __nc = __libcpp_snprintf_l(__nar, sizeof(__nar), _LIBCPP_GET_C_LOCALE, __fmt, __v);
@@ -1428,6 +1429,7 @@ num_put<_CharT, _OutputIterator>::do_put(iter_type __s, ios_base& __iob,
this->__format_int(__fmt+1, __len, true, __iob.flags());
const unsigned __nbuf = (numeric_limits<long long>::digits / 3)
+ ((numeric_limits<long long>::digits % 3) != 0)
+ + ((__iob.flags() & ios_base::showbase) != 0)
+ 2;
char __nar[__nbuf];
int __nc = __libcpp_snprintf_l(__nar, sizeof(__nar), _LIBCPP_GET_C_LOCALE, __fmt, __v);
@@ -1454,6 +1456,7 @@ num_put<_CharT, _OutputIterator>::do_put(iter_type __s, ios_base& __iob,
this->__format_int(__fmt+1, __len, false, __iob.flags());
const unsigned __nbuf = (numeric_limits<unsigned long>::digits / 3)
+ ((numeric_limits<unsigned long>::digits % 3) != 0)
+ + ((__iob.flags() & ios_base::showbase) != 0)
+ 1;
char __nar[__nbuf];
int __nc = __libcpp_snprintf_l(__nar, sizeof(__nar), _LIBCPP_GET_C_LOCALE, __fmt, __v);
@@ -1480,6 +1483,7 @@ num_put<_CharT, _OutputIterator>::do_put(iter_type __s, ios_base& __iob,
this->__format_int(__fmt+1, __len, false, __iob.flags());
const unsigned __nbuf = (numeric_limits<unsigned long long>::digits / 3)
+ ((numeric_limits<unsigned long long>::digits % 3) != 0)
+ + ((__iob.flags() & ios_base::showbase) != 0)
+ 1;
char __nar[__nbuf];
int __nc = __libcpp_snprintf_l(__nar, sizeof(__nar), _LIBCPP_GET_C_LOCALE, __fmt, __v);
@@ -1685,6 +1689,22 @@ protected:
~__time_get_c_storage() {}
};
+template <> _LIBCPP_FUNC_VIS const string* __time_get_c_storage<char>::__weeks() const;
+template <> _LIBCPP_FUNC_VIS const string* __time_get_c_storage<char>::__months() const;
+template <> _LIBCPP_FUNC_VIS const string* __time_get_c_storage<char>::__am_pm() const;
+template <> _LIBCPP_FUNC_VIS const string& __time_get_c_storage<char>::__c() const;
+template <> _LIBCPP_FUNC_VIS const string& __time_get_c_storage<char>::__r() const;
+template <> _LIBCPP_FUNC_VIS const string& __time_get_c_storage<char>::__x() const;
+template <> _LIBCPP_FUNC_VIS const string& __time_get_c_storage<char>::__X() const;
+
+template <> _LIBCPP_FUNC_VIS const wstring* __time_get_c_storage<wchar_t>::__weeks() const;
+template <> _LIBCPP_FUNC_VIS const wstring* __time_get_c_storage<wchar_t>::__months() const;
+template <> _LIBCPP_FUNC_VIS const wstring* __time_get_c_storage<wchar_t>::__am_pm() const;
+template <> _LIBCPP_FUNC_VIS const wstring& __time_get_c_storage<wchar_t>::__c() const;
+template <> _LIBCPP_FUNC_VIS const wstring& __time_get_c_storage<wchar_t>::__r() const;
+template <> _LIBCPP_FUNC_VIS const wstring& __time_get_c_storage<wchar_t>::__x() const;
+template <> _LIBCPP_FUNC_VIS const wstring& __time_get_c_storage<wchar_t>::__X() const;
+
template <class _CharT, class _InputIterator = istreambuf_iterator<_CharT> >
class _LIBCPP_TEMPLATE_VIS time_get
: public locale::facet,
@@ -2825,7 +2845,7 @@ money_get<_CharT, _InputIterator>::__do_get(iter_type& __b, iter_type __e,
return false;
}
}
- // drop through
+ _LIBCPP_FALLTHROUGH();
case money_base::none:
if (__p != 3)
{
diff --git a/contrib/libc++/include/memory b/contrib/libc++/include/memory
index 3fc0e5a1bf12..41ab01b46f7e 100644
--- a/contrib/libc++/include/memory
+++ b/contrib/libc++/include/memory
@@ -3559,7 +3559,7 @@ template <class _Tp, class _Dp, class _Alloc>
const void*
__shared_ptr_pointer<_Tp, _Dp, _Alloc>::__get_deleter(const type_info& __t) const _NOEXCEPT
{
- return __t == typeid(_Dp) ? _VSTD::addressof(__data_.first().second()) : 0;
+ return __t == typeid(_Dp) ? _VSTD::addressof(__data_.first().second()) : nullptr;
}
#endif // _LIBCPP_NO_RTTI
@@ -5293,7 +5293,8 @@ private:
friend _LIBCPP_FUNC_VIS __sp_mut& __get_sp_mut(const void*);
};
-_LIBCPP_FUNC_VIS __sp_mut& __get_sp_mut(const void*);
+_LIBCPP_FUNC_VIS _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
+__sp_mut& __get_sp_mut(const void*);
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
@@ -5304,6 +5305,7 @@ atomic_is_lock_free(const shared_ptr<_Tp>*)
}
template <class _Tp>
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
shared_ptr<_Tp>
atomic_load(const shared_ptr<_Tp>* __p)
{
@@ -5316,6 +5318,7 @@ atomic_load(const shared_ptr<_Tp>* __p)
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
shared_ptr<_Tp>
atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order)
{
@@ -5323,6 +5326,7 @@ atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order)
}
template <class _Tp>
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
void
atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
{
@@ -5334,6 +5338,7 @@ atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
void
atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order)
{
@@ -5341,6 +5346,7 @@ atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order)
}
template <class _Tp>
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
shared_ptr<_Tp>
atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
{
@@ -5353,6 +5359,7 @@ atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
shared_ptr<_Tp>
atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order)
{
@@ -5360,6 +5367,7 @@ atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r, memory_order
}
template <class _Tp>
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
bool
atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w)
{
@@ -5381,6 +5389,7 @@ atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, share
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
bool
atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_ptr<_Tp> __w)
{
@@ -5389,6 +5398,7 @@ atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v, shared_
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
bool
atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
shared_ptr<_Tp> __w, memory_order, memory_order)
@@ -5398,6 +5408,7 @@ atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* _
template <class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
+_LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
bool
atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
shared_ptr<_Tp> __w, memory_order, memory_order)
diff --git a/contrib/libc++/include/new b/contrib/libc++/include/new
index c0e7b2dafe5b..34df2efee09e 100644
--- a/contrib/libc++/include/new
+++ b/contrib/libc++/include/new
@@ -146,9 +146,8 @@ _LIBCPP_NORETURN _LIBCPP_FUNC_VIS void __throw_bad_alloc(); // not in C++ spec
#if defined(_LIBCPP_BUILDING_LIBRARY) || (_LIBCPP_STD_VER > 11)
-class _LIBCPP_EXCEPTION_ABI bad_array_length
- : public bad_alloc
-{
+class _LIBCPP_EXCEPTION_ABI _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH
+ bad_array_length : public bad_alloc {
public:
bad_array_length() _NOEXCEPT;
virtual ~bad_array_length() _NOEXCEPT;
@@ -182,7 +181,7 @@ _LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new(std::size_t __sz, const std::not
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p) _NOEXCEPT;
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, const std::nothrow_t&) _NOEXCEPT;
#ifndef _LIBCPP_HAS_NO_SIZED_DEALLOCATION
-_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, std::size_t __sz) _NOEXCEPT;
+_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operator delete(void* __p, std::size_t __sz) _NOEXCEPT;
#endif
_LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new[](std::size_t __sz) _THROW_BAD_ALLOC;
@@ -190,7 +189,7 @@ _LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new[](std::size_t __sz, const std::n
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p) _NOEXCEPT;
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, const std::nothrow_t&) _NOEXCEPT;
#ifndef _LIBCPP_HAS_NO_SIZED_DEALLOCATION
-_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, std::size_t __sz) _NOEXCEPT;
+_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operator delete[](void* __p, std::size_t __sz) _NOEXCEPT;
#endif
#ifndef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
@@ -199,7 +198,7 @@ _LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new(std::size_t __sz, std::align_val
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, std::align_val_t) _NOEXCEPT;
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, std::align_val_t, const std::nothrow_t&) _NOEXCEPT;
#ifndef _LIBCPP_HAS_NO_SIZED_DEALLOCATION
-_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
+_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operator delete(void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
#endif
_LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new[](std::size_t __sz, std::align_val_t) _THROW_BAD_ALLOC;
@@ -207,7 +206,7 @@ _LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new[](std::size_t __sz, std::align_v
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, std::align_val_t) _NOEXCEPT;
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, std::align_val_t, const std::nothrow_t&) _NOEXCEPT;
#ifndef _LIBCPP_HAS_NO_SIZED_DEALLOCATION
-_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
+_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operator delete[](void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
#endif
#endif
@@ -238,6 +237,9 @@ inline _LIBCPP_INLINE_VISIBILITY void __libcpp_deallocate(void *__ptr) {
#ifdef _LIBCPP_BAD_ARRAY_LENGTH_DEFINED
_LIBCPP_NORETURN inline _LIBCPP_ALWAYS_INLINE
+#ifndef _LIBCPP_NO_EXCEPTIONS
+_LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH
+#endif
void __throw_bad_array_length()
{
#ifndef _LIBCPP_NO_EXCEPTIONS
diff --git a/contrib/libc++/include/ostream b/contrib/libc++/include/ostream
index ca2c83f74a3f..9bf8d3cdcfb5 100644
--- a/contrib/libc++/include/ostream
+++ b/contrib/libc++/include/ostream
@@ -1080,8 +1080,10 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const bitset<_Size>& __x)
use_facet<ctype<_CharT> >(__os.getloc()).widen('1'));
}
+#ifndef _LIBCPP_AVAILABILITY_NO_STREAMS_EXTERN_TEMPLATE
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ostream<char>)
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ostream<wchar_t>)
+#endif
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/libc++/include/random b/contrib/libc++/include/random
index 83fff90a4178..6c36e8c01ae2 100644
--- a/contrib/libc++/include/random
+++ b/contrib/libc++/include/random
@@ -3997,16 +3997,30 @@ public:
{return !(__x == __y);}
};
+#ifndef _LIBCPP_MSVCRT
+extern "C" double lgamma_r(double, int *);
+#endif
+
+inline _LIBCPP_INLINE_VISIBILITY double __libcpp_lgamma(double __d) {
+#if defined(_LIBCPP_MSVCRT)
+ return lgamma(__d);
+#else
+ int __sign;
+ return lgamma_r(__d, &__sign);
+#endif
+}
+
template<class _IntType>
-binomial_distribution<_IntType>::param_type::param_type(result_type __t, double __p)
+binomial_distribution<_IntType>::param_type::param_type(const result_type __t, const double __p)
: __t_(__t), __p_(__p)
{
if (0 < __p_ && __p_ < 1)
{
__r0_ = static_cast<result_type>((__t_ + 1) * __p_);
- __pr_ = _VSTD::exp(_VSTD::lgamma(__t_ + 1.) - _VSTD::lgamma(__r0_ + 1.) -
- _VSTD::lgamma(__t_ - __r0_ + 1.) + __r0_ * _VSTD::log(__p_) +
- (__t_ - __r0_) * _VSTD::log(1 - __p_));
+ __pr_ = _VSTD::exp(__libcpp_lgamma(__t_ + 1.) -
+ __libcpp_lgamma(__r0_ + 1.) -
+ __libcpp_lgamma(__t_ - __r0_ + 1.) + __r0_ * _VSTD::log(__p_) +
+ (__t_ - __r0_) * _VSTD::log(1 - __p_));
__odds_ratio_ = __p_ / (1 - __p_);
}
}
diff --git a/contrib/libc++/include/shared_mutex b/contrib/libc++/include/shared_mutex
index f2fd667b5c67..ff36ee6ac67d 100644
--- a/contrib/libc++/include/shared_mutex
+++ b/contrib/libc++/include/shared_mutex
@@ -141,7 +141,7 @@ template <class Mutex>
_LIBCPP_BEGIN_NAMESPACE_STD
-struct _LIBCPP_TYPE_VIS __shared_mutex_base
+struct _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_SHARED_MUTEX __shared_mutex_base
{
mutex __mut_;
condition_variable __gate1_;
@@ -173,11 +173,11 @@ struct _LIBCPP_TYPE_VIS __shared_mutex_base
#if _LIBCPP_STD_VER > 14
-class _LIBCPP_TYPE_VIS shared_mutex
+class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_SHARED_MUTEX shared_mutex
{
__shared_mutex_base __base;
public:
- shared_mutex() : __base() {}
+ _LIBCPP_INLINE_VISIBILITY shared_mutex() : __base() {}
_LIBCPP_INLINE_VISIBILITY ~shared_mutex() = default;
shared_mutex(const shared_mutex&) = delete;
@@ -199,7 +199,7 @@ public:
#endif
-class _LIBCPP_TYPE_VIS shared_timed_mutex
+class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_SHARED_MUTEX shared_timed_mutex
{
__shared_mutex_base __base;
public:
diff --git a/contrib/libc++/include/streambuf b/contrib/libc++/include/streambuf
index 86070659a4b4..12eded5c4d31 100644
--- a/contrib/libc++/include/streambuf
+++ b/contrib/libc++/include/streambuf
@@ -476,11 +476,13 @@ basic_streambuf<_CharT, _Traits>::overflow(int_type)
return traits_type::eof();
}
+#ifndef _LIBCPP_AVAILABILITY_NO_STREAMS_EXTERN_TEMPLATE
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_streambuf<char>)
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_streambuf<wchar_t>)
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ios<char>)
_LIBCPP_EXTERN_TEMPLATE(class _LIBCPP_EXTERN_TEMPLATE_TYPE_VIS basic_ios<wchar_t>)
+#endif
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/libc++/include/typeinfo b/contrib/libc++/include/typeinfo
index 4145ac1a3737..8624b349764a 100644
--- a/contrib/libc++/include/typeinfo
+++ b/contrib/libc++/include/typeinfo
@@ -108,6 +108,7 @@ protected:
#endif
public:
+ _LIBCPP_AVAILABILITY_TYPEINFO_VTABLE
virtual ~type_info();
#if defined(_LIBCPP_HAS_NONUNIQUE_TYPEINFO)
diff --git a/contrib/libc++/src/exception.cpp b/contrib/libc++/src/exception.cpp
index 0b502cd134c0..4359d126173a 100644
--- a/contrib/libc++/src/exception.cpp
+++ b/contrib/libc++/src/exception.cpp
@@ -20,7 +20,7 @@
#if defined(_LIBCPP_ABI_MICROSOFT)
#include "support/runtime/exception_msvc.ipp"
-#include "support/runtime/exception_pointer_unimplemented.ipp"
+#include "support/runtime/exception_pointer_msvc.ipp"
#elif defined(_LIBCPPABI_VERSION)
#include "support/runtime/exception_libcxxabi.ipp"
#include "support/runtime/exception_pointer_cxxabi.ipp"
diff --git a/contrib/libc++/src/experimental/filesystem/operations.cpp b/contrib/libc++/src/experimental/filesystem/operations.cpp
index bd7685819eca..2856ae453a6b 100644
--- a/contrib/libc++/src/experimental/filesystem/operations.cpp
+++ b/contrib/libc++/src/experimental/filesystem/operations.cpp
@@ -513,8 +513,8 @@ bool checked_set(CType* out, ChronoType time) {
return true;
}
-using TimeSpec = struct ::timespec;
-using StatT = struct ::stat;
+using TimeSpec = struct timespec;
+using StatT = struct stat;
#if defined(__APPLE__)
TimeSpec extract_mtime(StatT const& st) { return st.st_mtimespec; }
diff --git a/contrib/libc++/src/locale.cpp b/contrib/libc++/src/locale.cpp
index 1460f9662e35..1ed9b41fd4ad 100644
--- a/contrib/libc++/src/locale.cpp
+++ b/contrib/libc++/src/locale.cpp
@@ -68,8 +68,8 @@ T&
make(A0 a0)
{
static typename aligned_storage<sizeof(T)>::type buf;
- ::new (&buf) T(a0);
- return *reinterpret_cast<T*>(&buf);
+ auto *obj = ::new (&buf) T(a0);
+ return *obj;
}
template <class T, class A0, class A1>
@@ -88,8 +88,8 @@ T&
make(A0 a0, A1 a1, A2 a2)
{
static typename aligned_storage<sizeof(T)>::type buf;
- ::new (&buf) T(a0, a1, a2);
- return *reinterpret_cast<T*>(&buf);
+ auto *obj = ::new (&buf) T(a0, a1, a2);
+ return *obj;
}
template <typename T, size_t N>
@@ -480,8 +480,8 @@ locale::__imp::make_global()
{
// only one thread can get in here and it only gets in once
static aligned_storage<sizeof(locale)>::type buf;
- ::new (&buf) locale(locale::classic());
- return *reinterpret_cast<locale*>(&buf);
+ auto *obj = ::new (&buf) locale(locale::classic());
+ return *obj;
}
locale&
diff --git a/contrib/libc++/src/memory.cpp b/contrib/libc++/src/memory.cpp
index f6f6fe8da628..4e0d3af9167e 100644
--- a/contrib/libc++/src/memory.cpp
+++ b/contrib/libc++/src/memory.cpp
@@ -120,7 +120,7 @@ __shared_weak_count::lock() _NOEXCEPT
object_owners+1))
return this;
}
- return 0;
+ return nullptr;
}
#if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
@@ -128,7 +128,7 @@ __shared_weak_count::lock() _NOEXCEPT
const void*
__shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT
{
- return 0;
+ return nullptr;
}
#endif // _LIBCPP_NO_RTTI
@@ -154,7 +154,7 @@ __sp_mut::lock() _NOEXCEPT
{
auto m = static_cast<__libcpp_mutex_t*>(__lx);
unsigned count = 0;
- while (__libcpp_mutex_trylock(m) != 0)
+ while (!__libcpp_mutex_trylock(m))
{
if (++count > 16)
{
diff --git a/contrib/libc++/src/support/runtime/exception_pointer_msvc.ipp b/contrib/libc++/src/support/runtime/exception_pointer_msvc.ipp
new file mode 100644
index 000000000000..a8cd0e8d304d
--- /dev/null
+++ b/contrib/libc++/src/support/runtime/exception_pointer_msvc.ipp
@@ -0,0 +1,94 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdio.h>
+#include <stdlib.h>
+
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL __ExceptionPtrCreate(_Out_ void*);
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL __ExceptionPtrDestroy(_Inout_ void*);
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL __ExceptionPtrCopy(_Out_ void*,
+ _In_ const void*);
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL
+__ExceptionPtrAssign(_Inout_ void*, _In_ const void*);
+_CRTIMP2_PURE bool __CLRCALL_PURE_OR_CDECL
+__ExceptionPtrCompare(_In_ const void*, _In_ const void*);
+_CRTIMP2_PURE bool __CLRCALL_PURE_OR_CDECL
+__ExceptionPtrToBool(_In_ const void*);
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL __ExceptionPtrSwap(_Inout_ void*,
+ _Inout_ void*);
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL
+__ExceptionPtrCurrentException(_Out_ void*);
+[[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL
+__ExceptionPtrRethrow(_In_ const void*);
+_CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL
+__ExceptionPtrCopyException(_Inout_ void*, _In_ const void*, _In_ const void*);
+
+namespace std {
+
+exception_ptr::exception_ptr() _NOEXCEPT { __ExceptionPtrCreate(this); }
+exception_ptr::exception_ptr(nullptr_t) _NOEXCEPT { __ExceptionPtrCreate(this); }
+
+exception_ptr::exception_ptr(const exception_ptr& __other) _NOEXCEPT {
+ __ExceptionPtrCopy(this, &__other);
+}
+exception_ptr& exception_ptr::operator=(const exception_ptr& __other) _NOEXCEPT {
+ __ExceptionPtrAssign(this, &__other);
+ return *this;
+}
+
+exception_ptr& exception_ptr::operator=(nullptr_t) _NOEXCEPT {
+ exception_ptr dummy;
+ __ExceptionPtrAssign(this, &dummy);
+ return *this;
+}
+
+exception_ptr::~exception_ptr() _NOEXCEPT { __ExceptionPtrDestroy(this); }
+
+exception_ptr::operator bool() const _NOEXCEPT {
+ return __ExceptionPtrToBool(this);
+}
+
+bool operator==(const exception_ptr& __x, const exception_ptr& __y) _NOEXCEPT {
+ return __ExceptionPtrCompare(&__x, &__y);
+}
+
+
+void swap(exception_ptr& lhs, exception_ptr& rhs) _NOEXCEPT {
+ __ExceptionPtrSwap(&rhs, &lhs);
+}
+
+exception_ptr __copy_exception_ptr(void* __except, const void* __ptr) {
+ exception_ptr __ret = nullptr;
+ if (__ptr)
+ __ExceptionPtrCopyException(&__ret, __except, __ptr);
+ return __ret;
+}
+
+exception_ptr current_exception() _NOEXCEPT {
+ exception_ptr __ret;
+ __ExceptionPtrCurrentException(&__ret);
+ return __ret;
+}
+
+_LIBCPP_NORETURN
+void rethrow_exception(exception_ptr p) { __ExceptionPtrRethrow(&p); }
+
+nested_exception::nested_exception() _NOEXCEPT : __ptr_(current_exception()) {}
+
+nested_exception::~nested_exception() _NOEXCEPT {}
+
+_LIBCPP_NORETURN
+void nested_exception::rethrow_nested() const {
+ if (__ptr_ == nullptr)
+ terminate();
+ rethrow_exception(__ptr_);
+}
+
+} // namespace std
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index 63c92c1a7fce..c3822e35906a 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -842,6 +842,7 @@ public:
///
/// \returns *this
APInt &operator*=(const APInt &RHS);
+ APInt &operator*=(uint64_t RHS);
/// \brief Addition assignment operator.
///
@@ -2043,6 +2044,16 @@ inline APInt operator-(uint64_t LHS, APInt b) {
return b;
}
+inline APInt operator*(APInt a, uint64_t RHS) {
+ a *= RHS;
+ return a;
+}
+
+inline APInt operator*(uint64_t LHS, APInt b) {
+ b *= LHS;
+ return b;
+}
+
namespace APIntOps {
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index 5aa101591e6e..e835f1516225 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -217,7 +217,7 @@ public:
unsigned BitPos = Prev % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off previous bits.
- Copy &= ~0UL << BitPos;
+ Copy &= maskTrailingZeros<BitWord>(BitPos);
if (Copy != 0)
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
@@ -229,7 +229,7 @@ public:
return -1;
}
- /// find_next_unset - Returns the index of the next usnet bit following the
+ /// find_next_unset - Returns the index of the next unset bit following the
/// "Prev" bit. Returns -1 if all remaining bits are set.
int find_next_unset(unsigned Prev) const {
++Prev;
@@ -253,7 +253,34 @@ public:
return -1;
}
- /// clear - Clear all bits.
+ /// find_prev - Returns the index of the first set bit that precedes the
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
+ int find_prev(unsigned PriorTo) {
+ if (PriorTo == 0)
+ return -1;
+
+ --PriorTo;
+
+ unsigned WordPos = PriorTo / BITWORD_SIZE;
+ unsigned BitPos = PriorTo % BITWORD_SIZE;
+ BitWord Copy = Bits[WordPos];
+ // Mask off next bits.
+ Copy &= maskTrailingOnes<BitWord>(BitPos + 1);
+
+ if (Copy != 0)
+ return (WordPos + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
+
+ // Check previous words.
+ for (unsigned i = 1; i <= WordPos; ++i) {
+ unsigned Index = WordPos - i;
+ if (Bits[Index] == 0)
+ continue;
+ return (Index + 1) * BITWORD_SIZE - countLeadingZeros(Bits[Index]) - 1;
+ }
+ return -1;
+ }
+
+ /// clear - Removes all bits from the bitvector. Does not change capacity.
void clear() {
Size = 0;
}
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index bf16af5933f0..0eeacc162543 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -278,6 +278,24 @@ public:
return getPointer()->find_next_unset(Prev);
}
+ /// find_prev - Returns the index of the first set bit that precedes the
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
+ int find_prev(unsigned PriorTo) const {
+ if (isSmall()) {
+ if (PriorTo == 0)
+ return -1;
+
+ --PriorTo;
+ uintptr_t Bits = getSmallBits();
+ Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
+ if (Bits == 0)
+ return -1;
+
+ return NumBaseBits - countLeadingZeros(Bits) - 1;
+ }
+ return getPointer()->find_prev(PriorTo);
+ }
+
/// Clear all bits.
void clear() {
if (!isSmall())
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
index 66c9f68afc60..249fa572c024 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -220,8 +220,8 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
- // Keep track of the number of BBs visited.
- unsigned NumVisited = 0;
+ // Keep track of the BBs visited.
+ SmallPtrSet<BlockT*, 8> VisitedBBs;
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
@@ -259,10 +259,18 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
assert(BB != &getHeader()->getParent()->front() &&
"Loop contains function entry block!");
- NumVisited++;
+ VisitedBBs.insert(BB);
}
- assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
+ if (VisitedBBs.size() != getNumBlocks()) {
+ dbgs() << "The following blocks are unreachable in the loop: ";
+ for (auto BB : Blocks) {
+ if (!VisitedBBs.count(BB)) {
+ dbgs() << *BB << "\n";
+ }
+ }
+ assert(false && "Unreachable block in loop");
+ }
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h b/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
index 1aec35c3e677..75c4cbd03706 100644
--- a/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -54,6 +54,18 @@ public:
ProfileSummaryInfo(Module &M) : M(M) {}
ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
: M(Arg.M), Summary(std::move(Arg.Summary)) {}
+
+ /// Handle the invalidation of this information.
+ ///
+ /// When used as a result of \c ProfileSummaryAnalysis this method will be
+ /// called when the module this was computed for changes. Since profile
+ /// summary is immutable after it is annotated on the module, we return false
+ /// here.
+ bool invalidate(Module &, const PreservedAnalyses &,
+ ModuleAnalysisManager::Invalidator &) {
+ return false;
+ }
+
/// Returns the profile count for \p CallInst.
static Optional<uint64_t> getProfileCount(const Instruction *CallInst,
BlockFrequencyInfo *BFI);
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index 54bc4dcfd2cd..85350fa159d6 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -782,13 +782,13 @@ private:
/// Set the memoized range for the given SCEV.
const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
- const ConstantRange &CR) {
+ ConstantRange &&CR) {
DenseMap<const SCEV *, ConstantRange> &Cache =
Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
- auto Pair = Cache.insert({S, CR});
+ auto Pair = Cache.try_emplace(S, std::move(CR));
if (!Pair.second)
- Pair.first->second = CR;
+ Pair.first->second = std::move(CR);
return Pair.first->second;
}
@@ -816,6 +816,10 @@ private:
/// Helper function called from createNodeForPHI.
const SCEV *createAddRecFromPHI(PHINode *PN);
+ /// A helper function for createAddRecFromPHI to handle simple cases.
+ const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
+ Value *StartValueV);
+
/// Helper function called from createNodeForPHI.
const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
@@ -1565,7 +1569,7 @@ public:
/// delinearization).
void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const;
+ const SCEV *ElementSize);
void print(raw_ostream &OS) const;
void verify() const;
diff --git a/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
index 637fc7ed30dd..099a3c7cf2ac 100644
--- a/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
@@ -1115,6 +1115,9 @@ TLI_DEFINE_STRING_INTERNAL("vsprintf")
/// int vsscanf(const char *s, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vsscanf)
TLI_DEFINE_STRING_INTERNAL("vsscanf")
+/// size_t wcslen (const wchar_t* wcs);
+TLI_DEFINE_ENUM_INTERNAL(wcslen)
+TLI_DEFINE_STRING_INTERNAL("wcslen")
/// ssize_t write(int fildes, const void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(write)
TLI_DEFINE_STRING_INTERNAL("write")
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index fb8c8408fc77..180c0b579248 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -226,6 +226,7 @@ public:
FUNCTION_EXIT = 1,
TAIL_CALL = 2,
LOG_ARGS_ENTER = 3,
+ CUSTOM_EVENT = 4,
};
// The table will contain these structs that point to the sled, the function
@@ -242,7 +243,7 @@ public:
};
// All the sleds to be emitted.
- std::vector<XRayFunctionEntry> Sleds;
+ SmallVector<XRayFunctionEntry, 4> Sleds;
// Helper function to record a given XRay sled.
void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
index 2abe3bb11556..57fa0c73d272 100644
--- a/contrib/llvm/include/llvm/CodeGen/FastISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -506,6 +506,7 @@ protected:
bool selectCast(const User *I, unsigned Opcode);
bool selectExtractValue(const User *I);
bool selectInsertValue(const User *I);
+ bool selectXRayCustomEvent(const CallInst *II);
private:
/// \brief Handle PHI nodes in successor blocks.
diff --git a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index 14ee5019ef2f..e7544bd7b70c 100644
--- a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -249,7 +249,7 @@ public:
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
const KnownBits &Known) {
// Only install this information if it tells us something.
- if (NumSignBits == 1 && Known.Zero == 0 && Known.One == 0)
+ if (NumSignBits == 1 && Known.isUnknown())
return;
LiveOutRegInfo.grow(Reg);
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 31ffdc0e2e78..e292e8913db0 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -78,7 +78,7 @@ private:
/// this function.
DenseMap<const AllocaInst *, int> FrameIndices;
- /// Methods for translating form LLVM IR to MachineInstr.
+ /// \name Methods for translating form LLVM IR to MachineInstr.
/// \see ::translate for general information on the translate methods.
/// @{
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 472f50576d96..6b662a7f7413 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -45,7 +45,7 @@ class MachineIRBuilder {
/// Debug location to be set to any instruction we create.
DebugLoc DL;
- /// Fields describing the insertion point.
+ /// \name Fields describing the insertion point.
/// @{
MachineBasicBlock *MBB;
MachineBasicBlock::iterator II;
@@ -84,7 +84,7 @@ public:
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
/// @}
- /// Setters for the insertion point.
+ /// \name Setters for the insertion point.
/// @{
/// Set the MachineFunction where to build instructions.
void setMF(MachineFunction &);
@@ -98,7 +98,7 @@ public:
void setInstr(MachineInstr &MI);
/// @}
- /// Control where instructions we create are recorded (typically for
+ /// \name Control where instructions we create are recorded (typically for
/// visiting again later during legalization).
/// @{
void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
index daa8dcf2061b..f610bc02b6f2 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -309,7 +309,7 @@ public:
Impossible
};
- /// Convenient types for a list of insertion points.
+ /// \name Convenient types for a list of insertion points.
/// @{
typedef SmallVector<std::unique_ptr<InsertPoint>, 2> InsertionPoints;
typedef InsertionPoints::iterator insertpt_iterator;
@@ -341,7 +341,7 @@ public:
const TargetRegisterInfo &TRI, Pass &P,
RepairingKind Kind = RepairingKind::Insert);
- /// Getters.
+ /// \name Getters.
/// @{
RepairingKind getKind() const { return Kind; }
unsigned getOpIdx() const { return OpIdx; }
@@ -349,7 +349,7 @@ public:
bool hasSplit() { return HasSplit; }
/// @}
- /// Overloaded methods to add an insertion point.
+ /// \name Overloaded methods to add an insertion point.
/// @{
/// Add a MBBInsertionPoint to the list of InsertPoints.
void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
@@ -362,7 +362,7 @@ public:
void addInsertPoint(InsertPoint &Point);
/// @}
- /// Accessors related to the insertion points.
+ /// \name Accessors related to the insertion points.
/// @{
insertpt_iterator begin() { return InsertPoints.begin(); }
insertpt_iterator end() { return InsertPoints.end(); }
@@ -561,7 +561,7 @@ private:
/// Find the best mapping for \p MI from \p PossibleMappings.
/// \return a reference on the best mapping in \p PossibleMappings.
- RegisterBankInfo::InstructionMapping &
+ const RegisterBankInfo::InstructionMapping &
findBestMapping(MachineInstr &MI,
RegisterBankInfo::InstructionMappings &PossibleMappings,
SmallVectorImpl<RepairingPlacement> &RepairPts);
diff --git a/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 600733ac6a2d..f32233b3a9e4 100644
--- a/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -264,7 +264,7 @@ public:
/// Convenient type to represent the alternatives for mapping an
/// instruction.
/// \todo When we move to TableGen this should be an array ref.
- typedef SmallVector<InstructionMapping, 4> InstructionMappings;
+ typedef SmallVector<const InstructionMapping *, 4> InstructionMappings;
/// Helper class used to get/create the virtual registers that will be used
/// to replace the MachineOperand when applying a mapping.
@@ -310,7 +310,7 @@ public:
OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
MachineRegisterInfo &MRI);
- /// Getters.
+ /// \name Getters.
/// @{
/// The MachineInstr being remapped.
MachineInstr &getMI() const { return MI; }
@@ -378,15 +378,23 @@ protected:
/// Keep dynamically allocated PartialMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
- mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>> MapOfPartialMappings;
+ mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
+ MapOfPartialMappings;
/// Keep dynamically allocated ValueMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
- mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping> > MapOfValueMappings;
+ mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
+ MapOfValueMappings;
/// Keep dynamically allocated array of ValueMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
- mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>> MapOfOperandsMappings;
+ mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
+ MapOfOperandsMappings;
+
+ /// Keep dynamically allocated InstructionMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
+ MapOfInstructionMappings;
/// Create a RegisterBankInfo that can accomodate up to \p NumRegBanks
/// RegisterBank instances.
@@ -425,14 +433,14 @@ protected:
/// register, a register class, or a register bank.
/// In other words, this method will likely fail to find a mapping for
/// any generic opcode that has not been lowered by target specific code.
- InstructionMapping getInstrMappingImpl(const MachineInstr &MI) const;
+ const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
/// Get the uniquely generated PartialMapping for the
/// given arguments.
const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
const RegisterBank &RegBank) const;
- /// Methods to get a uniquely generated ValueMapping.
+ /// \name Methods to get a uniquely generated ValueMapping.
/// @{
/// The most common ValueMapping consists of a single PartialMapping.
@@ -445,7 +453,7 @@ protected:
unsigned NumBreakDowns) const;
/// @}
- /// Methods to get a uniquely generated array of ValueMapping.
+ /// \name Methods to get a uniquely generated array of ValueMapping.
/// @{
/// Get the uniquely generated array of ValueMapping for the
@@ -478,6 +486,33 @@ protected:
std::initializer_list<const ValueMapping *> OpdsMapping) const;
/// @}
+ /// \name Methods to get a uniquely generated InstructionMapping.
+ /// @{
+
+private:
+ /// Method to get a uniquely generated InstructionMapping.
+ const InstructionMapping &
+ getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
+ unsigned Cost = 0,
+ const ValueMapping *OperandsMapping = nullptr,
+ unsigned NumOperands = 0) const;
+
+public:
+ /// Method to get a uniquely generated InstructionMapping.
+ const InstructionMapping &
+ getInstructionMapping(unsigned ID, unsigned Cost,
+ const ValueMapping *OperandsMapping,
+ unsigned NumOperands) const {
+ return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
+ OperandsMapping, NumOperands);
+ }
+
+ /// Method to get a uniquely generated invalid InstructionMapping.
+ const InstructionMapping &getInvalidInstructionMapping() const {
+ return getInstructionMappingImpl(/*IsInvalid*/ true);
+ }
+ /// @}
+
/// Get the register bank for the \p OpIdx-th operand of \p MI form
/// the encoding constraints, if any.
///
@@ -603,7 +638,8 @@ public:
///
/// \note If returnedVal does not verify MI, this would probably mean
/// that the target does not support that instruction.
- virtual InstructionMapping getInstrMapping(const MachineInstr &MI) const;
+ virtual const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const;
/// Get the alternative mappings for \p MI.
/// Alternative in the sense different from getInstrMapping.
diff --git a/contrib/llvm/lib/CodeGen/MIRPrinter.h b/contrib/llvm/include/llvm/CodeGen/MIRPrinter.h
index 16aa9038b6b2..c73adc3f2b11 100644
--- a/contrib/llvm/lib/CodeGen/MIRPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/MIRPrinter.h
@@ -17,9 +17,11 @@
namespace llvm {
+class MachineBasicBlock;
class MachineFunction;
class Module;
class raw_ostream;
+template <typename T> class SmallVectorImpl;
/// Print LLVM IR using the MIR serialization format to the given output stream.
void printMIR(raw_ostream &OS, const Module &M);
@@ -28,6 +30,17 @@ void printMIR(raw_ostream &OS, const Module &M);
/// output stream.
void printMIR(raw_ostream &OS, const MachineFunction &MF);
+/// Determine a possible list of successors of a basic block based on the
+/// basic block machine operand being used inside the block. This should give
+/// you the correct list of successor blocks in most cases except for things
+/// like jump tables where the basic block references can't easily be found.
+/// The MIRPRinter will skip printing successors if they match the result of
+/// this funciton and the parser will use this function to construct a list if
+/// it is missing.
+void guessSuccessors(const MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineBasicBlock*> &Successors,
+ bool &IsFallthrough);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 61be9f775c97..689f3cd9fd12 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -520,6 +520,14 @@ public:
bool hasTailCall() const { return HasTailCall; }
void setHasTailCall() { HasTailCall = true; }
+ /// Computes the maximum size of a callframe and the AdjustsStack property.
+ /// This only works for targets defining
+ /// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
+ /// and getFrameSize().
+ /// This is usually computed by the prologue epilogue inserter but some
+ /// targets may call this to compute it earlier.
+ void computeMaxCallFrameSize(const MachineFunction &MF);
+
/// Return the maximum size of a call frame that must be
/// allocated for an outgoing function call. This is only available if
/// CallFrameSetup/Destroy pseudo instructions are used by the target, and
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 182d23ef3c90..f46ef41879d1 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -116,7 +116,7 @@ class MachineModuleInfo : public ImmutablePass {
// TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
- // even under this switch, we'd like .debug_frame to be precise when using.
+ // even under this switch, we'd like .debug_frame to be precise when using
// -g. At this moment, there's no way to specify that some CFI directives
// go into .eh_frame only, while others go into .debug_frame only.
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeDatabase.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeDatabase.h
index 220de4bf0ee4..be7b19e7df0c 100644
--- a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeDatabase.h
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeDatabase.h
@@ -21,7 +21,7 @@ namespace llvm {
namespace codeview {
class TypeDatabase {
public:
- TypeDatabase() : TypeNameStorage(Allocator) {}
+ explicit TypeDatabase(uint32_t ExpectedSize);
/// Gets the type index for the next type record.
TypeIndex getNextTypeIndex() const;
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
index b9f3425d5deb..3fae8b441439 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -310,6 +310,11 @@ class DWARFContextInMemory : public DWARFContext {
StringRef *MapSectionToMember(StringRef Name);
+ /// If Sec is compressed section, decompresses and updates its contents
+ /// provided by Data. Otherwise leaves it unchanged.
+ Error maybeDecompress(const object::SectionRef &Sec, StringRef Name,
+ StringRef &Data);
+
public:
DWARFContextInMemory(const object::ObjectFile &Obj,
const LoadedObjectInfo *L = nullptr);
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index 36b27228f5c6..f3516ebdecba 100644
--- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -39,20 +39,18 @@ public:
private:
struct ValueType {
- ValueType() {
- uval = 0;
- }
+ ValueType() { uval = 0; }
union {
uint64_t uval;
int64_t sval;
- const char* cstr;
+ const char *cstr;
};
- const uint8_t* data = nullptr;
+ const uint8_t *data = nullptr;
};
- dwarf::Form Form; // Form for this value.
- ValueType Value; // Contains all data for the form.
+ dwarf::Form Form; // Form for this value.
+ ValueType Value; // Contains all data for the form.
const DWARFUnit *U = nullptr; // Remember the DWARFUnit at extract time.
public:
@@ -84,7 +82,7 @@ public:
const DWARFUnit *U);
bool isInlinedCStr() const {
- return Value.data != nullptr && Value.data == (const uint8_t*)Value.cstr;
+ return Value.data != nullptr && Value.data == (const uint8_t *)Value.cstr;
}
/// getAsFoo functions below return the extracted value as Foo if only
@@ -135,45 +133,45 @@ public:
uint8_t AddrSize,
llvm::dwarf::DwarfFormat Format);
- /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ /// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
- /// \param debug_info_data the .debug_info data to use to skip the value.
- /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param DebugInfoData the .debug_info data to use to skip the value.
+ /// \param OffsetPtr a reference to the offset that will be updated.
/// \param U the DWARFUnit to use when skipping the form in case the form
/// size differs according to data in the DWARFUnit.
/// \returns true on success, false if the form was not skipped.
- bool skipValue(DataExtractor debug_info_data, uint32_t *offset_ptr,
+ bool skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
const DWARFUnit *U) const;
- /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ /// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
- /// \param form the DW_FORM enumeration that indicates the form to skip.
- /// \param debug_info_data the .debug_info data to use to skip the value.
- /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param Form the DW_FORM enumeration that indicates the form to skip.
+ /// \param DebugInfoData the .debug_info data to use to skip the value.
+ /// \param OffsetPtr a reference to the offset that will be updated.
/// \param U the DWARFUnit to use when skipping the form in case the form
/// size differs according to data in the DWARFUnit.
/// \returns true on success, false if the form was not skipped.
- static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
- uint32_t *offset_ptr, const DWARFUnit *U);
+ static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, const DWARFUnit *U);
- /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ /// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
- /// \param form the DW_FORM enumeration that indicates the form to skip.
- /// \param debug_info_data the .debug_info data to use to skip the value.
- /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param Form the DW_FORM enumeration that indicates the form to skip.
+ /// \param DebugInfoData the .debug_info data to use to skip the value.
+ /// \param OffsetPtr a reference to the offset that will be updated.
/// \param Version DWARF version number.
/// \param AddrSize size of an address in bytes.
/// \param Format enum value from llvm::dwarf::DwarfFormat.
/// \returns true on success, false if the form was not skipped.
- static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
- uint32_t *offset_ptr, uint16_t Version,
- uint8_t AddrSize, llvm::dwarf::DwarfFormat Format);
+ static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, uint16_t Version, uint8_t AddrSize,
+ llvm::dwarf::DwarfFormat Format);
private:
void dumpString(raw_ostream &OS) const;
@@ -181,149 +179,146 @@ private:
namespace dwarf {
- /// Take an optional DWARFFormValue and try to extract a string value from it.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and was a string.
- inline Optional<const char*> toString(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsCString();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a string value from it.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the string value or Default if the V doesn't have a value or the
- /// form value's encoding wasn't a string.
- inline const char*
- toString(const Optional<DWARFFormValue>& V, const char *Default) {
- return toString(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an unsigned constant.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a unsigned constant form.
- inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsUnsignedConstant();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a unsigned constant.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted unsigned value or Default if the V doesn't have a
- /// value or the form value's encoding wasn't an unsigned constant form.
- inline uint64_t
- toUnsigned(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toUnsigned(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an reference.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a reference form.
- inline Optional<uint64_t> toReference(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsReference();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a reference.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted reference value or Default if the V doesn't have a
- /// value or the form value's encoding wasn't a reference form.
- inline uint64_t
- toReference(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toReference(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an signed constant.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a signed constant form.
- inline Optional<int64_t> toSigned(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsSignedConstant();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a signed integer.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted signed integer value or Default if the V doesn't
- /// have a value or the form value's encoding wasn't a signed integer form.
- inline int64_t
- toSigned(const Optional<DWARFFormValue>& V, int64_t Default) {
- return toSigned(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an address.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a address form.
- inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsAddress();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a address.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted address value or Default if the V doesn't have a
- /// value or the form value's encoding wasn't an address form.
- inline uint64_t
- toAddress(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toAddress(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an section offset.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a section offset form.
- inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsSectionOffset();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a section offset.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted section offset value or Default if the V doesn't
- /// have a value or the form value's encoding wasn't a section offset form.
- inline uint64_t
- toSectionOffset(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toSectionOffset(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract block data.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a block form.
- inline Optional<ArrayRef<uint8_t>>
- toBlock(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsBlock();
- return None;
- }
+/// Take an optional DWARFFormValue and try to extract a string value from it.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and was a string.
+inline Optional<const char *> toString(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsCString();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a string value from it.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the string value or Default if the V doesn't have a value or the
+/// form value's encoding wasn't a string.
+inline const char *toString(const Optional<DWARFFormValue> &V,
+ const char *Default) {
+ return toString(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an unsigned constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a unsigned constant form.
+inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsUnsignedConstant();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a unsigned constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted unsigned value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't an unsigned constant form.
+inline uint64_t toUnsigned(const Optional<DWARFFormValue> &V,
+ uint64_t Default) {
+ return toUnsigned(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an reference.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a reference form.
+inline Optional<uint64_t> toReference(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsReference();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a reference.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted reference value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't a reference form.
+inline uint64_t toReference(const Optional<DWARFFormValue> &V,
+ uint64_t Default) {
+ return toReference(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an signed constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a signed constant form.
+inline Optional<int64_t> toSigned(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsSignedConstant();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a signed integer.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted signed integer value or Default if the V doesn't
+/// have a value or the form value's encoding wasn't a signed integer form.
+inline int64_t toSigned(const Optional<DWARFFormValue> &V, int64_t Default) {
+ return toSigned(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an address.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a address form.
+inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsAddress();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a address.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted address value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't an address form.
+inline uint64_t toAddress(const Optional<DWARFFormValue> &V, uint64_t Default) {
+ return toAddress(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an section offset.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a section offset form.
+inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsSectionOffset();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a section offset.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted section offset value or Default if the V doesn't
+/// have a value or the form value's encoding wasn't a section offset form.
+inline uint64_t toSectionOffset(const Optional<DWARFFormValue> &V,
+ uint64_t Default) {
+ return toSectionOffset(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract block data.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a block form.
+inline Optional<ArrayRef<uint8_t>> toBlock(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsBlock();
+ return None;
+}
} // end namespace dwarf
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
index d1f791b9daed..7e77f5a3eef9 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
@@ -53,14 +53,6 @@ private:
const ModuleInfoHeader *Layout = nullptr;
};
-struct ModuleInfoEx {
- ModuleInfoEx(const DbiModuleDescriptor &Info) : Info(Info) {}
- ModuleInfoEx(const ModuleInfoEx &Ex) = default;
-
- DbiModuleDescriptor Info;
- std::vector<StringRef> SourceFiles;
-};
-
} // end namespace pdb
template <> struct VarStreamArrayExtractor<pdb::DbiModuleDescriptor> {
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
new file mode 100644
index 000000000000..bcf1cff8f6e5
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
@@ -0,0 +1,114 @@
+//===- DbiModuleList.h - PDB module information list ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H
+#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+
+class DbiModuleList;
+struct FileInfoSubstreamHeader;
+
+class DbiModuleSourceFilesIterator
+ : public iterator_facade_base<DbiModuleSourceFilesIterator,
+ std::random_access_iterator_tag, StringRef> {
+ typedef iterator_facade_base<DbiModuleSourceFilesIterator,
+ std::random_access_iterator_tag, StringRef>
+ BaseType;
+
+public:
+ DbiModuleSourceFilesIterator(const DbiModuleList &Modules, uint32_t Modi,
+ uint16_t Filei);
+ DbiModuleSourceFilesIterator() = default;
+ DbiModuleSourceFilesIterator &
+ operator=(const DbiModuleSourceFilesIterator &R) = default;
+
+ bool operator==(const DbiModuleSourceFilesIterator &R) const;
+
+ const StringRef &operator*() const { return ThisValue; }
+ StringRef &operator*() { return ThisValue; }
+
+ bool operator<(const DbiModuleSourceFilesIterator &RHS) const;
+ std::ptrdiff_t operator-(const DbiModuleSourceFilesIterator &R) const;
+ DbiModuleSourceFilesIterator &operator+=(std::ptrdiff_t N);
+ DbiModuleSourceFilesIterator &operator-=(std::ptrdiff_t N);
+
+private:
+ void setValue();
+
+ bool isEnd() const;
+ bool isCompatible(const DbiModuleSourceFilesIterator &R) const;
+ bool isUniversalEnd() const;
+
+ StringRef ThisValue;
+ const DbiModuleList *Modules{nullptr};
+ uint32_t Modi{0};
+ uint16_t Filei{0};
+};
+
+class DbiModuleList {
+ friend DbiModuleSourceFilesIterator;
+
+public:
+ Error initialize(BinaryStreamRef ModInfo, BinaryStreamRef FileInfo);
+
+ Expected<StringRef> getFileName(uint32_t Index) const;
+ uint32_t getModuleCount() const;
+ uint32_t getSourceFileCount() const;
+ uint16_t getSourceFileCount(uint32_t Modi) const;
+
+ iterator_range<DbiModuleSourceFilesIterator>
+ source_files(uint32_t Modi) const;
+
+ DbiModuleDescriptor getModuleDescriptor(uint32_t Modi) const;
+
+private:
+ Error initializeModInfo(BinaryStreamRef ModInfo);
+ Error initializeFileInfo(BinaryStreamRef FileInfo);
+
+ VarStreamArray<DbiModuleDescriptor> Descriptors;
+
+ FixedStreamArray<support::little32_t> FileNameOffsets;
+ FixedStreamArray<support::ulittle16_t> ModFileCountArray;
+
+ // For each module, there are multiple filenames, which can be obtained by
+ // knowing the index of the file. Given the index of the file, one can use
+ // that as an offset into the FileNameOffsets array, which contains the
+ // absolute offset of the file name in NamesBuffer. Thus, for each module
+ // we store the first index in the FileNameOffsets array for this module.
+ // The number of files for the corresponding module is stored in
+ // ModFileCountArray.
+ std::vector<uint32_t> ModuleInitialFileIndex;
+
+ // In order to provide random access into the Descriptors array, we iterate it
+ // once up front to find the offsets of the individual items and store them in
+ // this array.
+ std::vector<uint32_t> ModuleDescriptorOffsets;
+
+ const FileInfoSubstreamHeader *FileInfoHeader = nullptr;
+
+ BinaryStreamRef ModInfoSubstream;
+ BinaryStreamRef FileInfoSubstream;
+ BinaryStreamRef NamesBuffer;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H \ No newline at end of file
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h
index 08262e47f77f..8f95481f4152 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -13,6 +13,7 @@
#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTable.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
@@ -68,9 +69,7 @@ public:
/// not present, returns InvalidStreamIndex.
uint32_t getDebugStreamIndex(DbgHeaderType Type) const;
- ArrayRef<ModuleInfoEx> modules() const;
-
- Expected<StringRef> getFileNameForIndex(uint32_t Index) const;
+ const DbiModuleList &modules() const;
FixedStreamArray<object::coff_section> getSectionHeaders();
@@ -80,27 +79,22 @@ public:
void visitSectionContributions(ISectionContribVisitor &Visitor) const;
private:
- Error initializeModInfoArray();
Error initializeSectionContributionData();
Error initializeSectionHeadersData();
Error initializeSectionMapData();
- Error initializeFileInfo();
Error initializeFpoRecords();
PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
- std::vector<ModuleInfoEx> ModuleInfos;
PDBStringTable ECNames;
- BinaryStreamRef ModInfoSubstream;
BinaryStreamRef SecContrSubstream;
BinaryStreamRef SecMapSubstream;
- BinaryStreamRef FileInfoSubstream;
BinaryStreamRef TypeServerMapSubstream;
BinaryStreamRef ECSubstream;
- BinaryStreamRef NamesBuffer;
+ DbiModuleList Modules;
FixedStreamArray<support::ulittle16_t> DbgStreams;
@@ -108,7 +102,6 @@ private:
FixedStreamArray<SectionContrib> SectionContribs;
FixedStreamArray<SectionContrib2> SectionContribs2;
FixedStreamArray<SecMapEntry> SectionMap;
- FixedStreamArray<support::little32_t> FileNameOffsets;
std::unique_ptr<msf::MappedBlockStream> SectionHeaderStream;
FixedStreamArray<object::coff_section> SectionHeaders;
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
index b1d980679a45..22ed61910d94 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -18,7 +18,7 @@ namespace pdb {
class NativeCompilandSymbol : public NativeRawSymbol {
public:
- NativeCompilandSymbol(NativeSession &Session, const ModuleInfoEx &MI);
+ NativeCompilandSymbol(NativeSession &Session, DbiModuleDescriptor MI);
PDB_SymType getSymTag() const override;
bool isEditAndContinueEnabled() const override;
uint32_t getLexicalParentId() const override;
@@ -26,7 +26,7 @@ public:
std::string getName() const override;
private:
- ModuleInfoEx Module;
+ DbiModuleDescriptor Module;
};
} // namespace pdb
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
index 18022f599bba..6aa1460dbb4e 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
@@ -16,13 +16,13 @@
namespace llvm {
namespace pdb {
+class DbiModuleList;
class NativeSession;
class NativeEnumModules : public IPDBEnumChildren<PDBSymbol> {
public:
- explicit NativeEnumModules(NativeSession &Session,
- ArrayRef<ModuleInfoEx> Modules,
- uint32_t Index = 0);
+ NativeEnumModules(NativeSession &Session, const DbiModuleList &Modules,
+ uint32_t Index = 0);
uint32_t getChildCount() const override;
std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
@@ -32,7 +32,7 @@ public:
private:
NativeSession &Session;
- ArrayRef<ModuleInfoEx> Modules;
+ const DbiModuleList &Modules;
uint32_t Index;
};
}
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h
index 93622d0a4394..979b8454dd5e 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -211,7 +211,7 @@ struct ModInfoFlags {
};
/// The header preceeding each entry in the Module Info substream of the DBI
-/// stream.
+/// stream. Corresponds to the type MODI in the reference implementation.
struct ModuleInfoHeader {
/// Currently opened module. This field is a pointer in the reference
/// implementation, but that won't work on 64-bit systems, and anyway it
@@ -243,9 +243,12 @@ struct ModuleInfoHeader {
/// Padding so the next field is 4-byte aligned.
char Padding1[2];
- /// Array of [0..NumFiles) DBI name buffer offsets. This field is a pointer
- /// in the reference implementation, but as with `Mod`, we ignore it for now
- /// since it is unused.
+ /// Array of [0..NumFiles) DBI name buffer offsets. In the reference
+ /// implementation this field is a pointer. But since you can't portably
+ /// serialize a pointer, on 64-bit platforms they copy all the values except
+ /// this one into the 32-bit version of the struct and use that for
+ /// serialization. Regardless, this field is unused, it is only there to
+ /// store a pointer that can be accessed at runtime.
support::ulittle32_t FileNameOffs;
/// Name Index for src file name
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h
index 62dde0ef08b7..9fef9bee5e1a 100644
--- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h
@@ -40,12 +40,12 @@ public:
uint32_t TypeIndexBegin() const;
uint32_t TypeIndexEnd() const;
- uint32_t NumTypeRecords() const;
+ uint32_t getNumTypeRecords() const;
uint16_t getTypeHashStreamIndex() const;
uint16_t getTypeHashStreamAuxIndex() const;
uint32_t getHashKeySize() const;
- uint32_t NumHashBuckets() const;
+ uint32_t getNumHashBuckets() const;
FixedStreamArray<support::ulittle32_t> getHashValues() const;
FixedStreamArray<TypeIndexOffset> getTypeIndexOffsets() const;
HashTable &getHashAdjusters();
@@ -55,8 +55,6 @@ public:
Error commit();
private:
- Error verifyHashValues();
-
const PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCSerialization.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
index a3be242b4457..1cb2448a3a44 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
@@ -355,7 +355,7 @@ public:
std::move(Deserialize)));
KeyName = &I->first;
}
-
+
{
assert(KeyName != nullptr && "No keyname pointer");
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
@@ -370,7 +370,7 @@ public:
};
}
}
-
+
static Error serialize(ChannelT &C, Error &&Err) {
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
index f5f52b5d2f92..de89f405af4c 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
@@ -10,6 +10,8 @@
#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+#include "llvm/ADT/Optional.h"
+
#include <cstdint>
#include <memory>
#include <string>
@@ -97,6 +99,10 @@ public:
StringRef SectionName,
bool LocalAddress);
+ /// \brief If there is a section at the given local address, return its load
+ /// address, otherwise return none.
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddress) const;
+
private:
std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
};
diff --git a/contrib/llvm/include/llvm/IR/Attributes.h b/contrib/llvm/include/llvm/IR/Attributes.h
index adcb7266073b..cbe681684a5c 100644
--- a/contrib/llvm/include/llvm/IR/Attributes.h
+++ b/contrib/llvm/include/llvm/IR/Attributes.h
@@ -244,7 +244,8 @@ public:
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
std::string getAsString(bool InAttrGrp = false) const;
- typedef const Attribute *iterator;
+ using iterator = const Attribute *;
+
iterator begin() const;
iterator end() const;
};
@@ -479,7 +480,7 @@ public:
/// \brief Return the attributes at the index as a string.
std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
- typedef ArrayRef<Attribute>::iterator iterator;
+ using iterator = ArrayRef<Attribute>::iterator;
iterator begin(unsigned Slot) const;
iterator end(unsigned Slot) const;
@@ -662,11 +663,11 @@ public:
bool empty() const { return Attrs.none(); }
// Iterators for target-dependent attributes.
- typedef std::pair<std::string, std::string> td_type;
- typedef std::map<std::string, std::string>::iterator td_iterator;
- typedef std::map<std::string, std::string>::const_iterator td_const_iterator;
- typedef iterator_range<td_iterator> td_range;
- typedef iterator_range<td_const_iterator> td_const_range;
+ using td_type = std::pair<std::string, std::string>;
+ using td_iterator = std::map<std::string, std::string>::iterator;
+ using td_const_iterator = std::map<std::string, std::string>::const_iterator;
+ using td_range = iterator_range<td_iterator>;
+ using td_const_range = iterator_range<td_const_iterator>;
td_iterator td_begin() { return TargetDepAttrs.begin(); }
td_iterator td_end() { return TargetDepAttrs.end(); }
diff --git a/contrib/llvm/include/llvm/IR/BasicBlock.h b/contrib/llvm/include/llvm/IR/BasicBlock.h
index bd210e1abf31..97989cf5c652 100644
--- a/contrib/llvm/include/llvm/IR/BasicBlock.h
+++ b/contrib/llvm/include/llvm/IR/BasicBlock.h
@@ -21,6 +21,7 @@
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Compiler.h"
#include "llvm-c/Types.h"
#include <cassert>
#include <cstddef>
@@ -31,7 +32,9 @@ class CallInst;
class Function;
class LandingPadInst;
class LLVMContext;
+class Module;
class TerminatorInst;
+class ValueSymbolTable;
/// \brief LLVM Basic Block Representation
///
@@ -51,7 +54,7 @@ class TerminatorInst;
class BasicBlock : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
public:
- typedef SymbolTableList<Instruction> InstListType;
+ using InstListType = SymbolTableList<Instruction>;
private:
friend class BlockAddress;
@@ -80,10 +83,10 @@ public:
LLVMContext &getContext() const;
/// Instruction iterators...
- typedef InstListType::iterator iterator;
- typedef InstListType::const_iterator const_iterator;
- typedef InstListType::reverse_iterator reverse_iterator;
- typedef InstListType::const_reverse_iterator const_reverse_iterator;
+ using iterator = InstListType::iterator;
+ using const_iterator = InstListType::const_iterator;
+ using reverse_iterator = InstListType::reverse_iterator;
+ using const_reverse_iterator = InstListType::const_reverse_iterator;
/// \brief Creates a new BasicBlock.
///
diff --git a/contrib/llvm/include/llvm/IR/CFG.h b/contrib/llvm/include/llvm/IR/CFG.h
index 52de11a06baf..e259e42e1ce4 100644
--- a/contrib/llvm/include/llvm/IR/CFG.h
+++ b/contrib/llvm/include/llvm/IR/CFG.h
@@ -37,9 +37,9 @@ namespace llvm {
template <class Ptr, class USE_iterator> // Predecessor Iterator
class PredIterator : public std::iterator<std::forward_iterator_tag,
Ptr, ptrdiff_t, Ptr*, Ptr*> {
- typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*,
- Ptr*> super;
- typedef PredIterator<Ptr, USE_iterator> Self;
+ using super =
+ std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*, Ptr*>;
+ using Self = PredIterator<Ptr, USE_iterator>;
USE_iterator It;
inline void advancePastNonTerminators() {
@@ -49,8 +49,8 @@ class PredIterator : public std::iterator<std::forward_iterator_tag,
}
public:
- typedef typename super::pointer pointer;
- typedef typename super::reference reference;
+ using pointer = typename super::pointer;
+ using reference = typename super::reference;
PredIterator() = default;
explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
@@ -90,11 +90,11 @@ public:
}
};
-typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
-typedef PredIterator<const BasicBlock,
- Value::const_user_iterator> const_pred_iterator;
-typedef iterator_range<pred_iterator> pred_range;
-typedef iterator_range<const_pred_iterator> pred_const_range;
+using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
+using const_pred_iterator =
+ PredIterator<const BasicBlock, Value::const_user_iterator>;
+using pred_range = iterator_range<pred_iterator>;
+using pred_const_range = iterator_range<const_pred_iterator>;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
@@ -118,12 +118,12 @@ inline pred_const_range predecessors(const BasicBlock *BB) {
// BasicBlock succ_iterator helpers
//===----------------------------------------------------------------------===//
-typedef TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>
- succ_iterator;
-typedef TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>
- succ_const_iterator;
-typedef iterator_range<succ_iterator> succ_range;
-typedef iterator_range<succ_const_iterator> succ_const_range;
+using succ_iterator =
+ TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>;
+using succ_const_iterator =
+ TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>;
+using succ_range = iterator_range<succ_iterator>;
+using succ_const_range = iterator_range<succ_const_iterator>;
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
@@ -160,8 +160,8 @@ struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
// graph of basic blocks...
template <> struct GraphTraits<BasicBlock*> {
- typedef BasicBlock *NodeRef;
- typedef succ_iterator ChildIteratorType;
+ using NodeRef = BasicBlock *;
+ using ChildIteratorType = succ_iterator;
static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
@@ -169,8 +169,8 @@ template <> struct GraphTraits<BasicBlock*> {
};
template <> struct GraphTraits<const BasicBlock*> {
- typedef const BasicBlock *NodeRef;
- typedef succ_const_iterator ChildIteratorType;
+ using NodeRef = const BasicBlock *;
+ using ChildIteratorType = succ_const_iterator;
static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
@@ -184,16 +184,18 @@ template <> struct GraphTraits<const BasicBlock*> {
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<BasicBlock*>> {
- typedef BasicBlock *NodeRef;
- typedef pred_iterator ChildIteratorType;
+ using NodeRef = BasicBlock *;
+ using ChildIteratorType = pred_iterator;
+
static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
template <> struct GraphTraits<Inverse<const BasicBlock*>> {
- typedef const BasicBlock *NodeRef;
- typedef const_pred_iterator ChildIteratorType;
+ using NodeRef = const BasicBlock *;
+ using ChildIteratorType = const_pred_iterator;
+
static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
@@ -211,7 +213,7 @@ template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef pointer_iterator<Function::iterator> nodes_iterator;
+ using nodes_iterator = pointer_iterator<Function::iterator>;
static nodes_iterator nodes_begin(Function *F) {
return nodes_iterator(F->begin());
@@ -228,7 +230,7 @@ template <> struct GraphTraits<const Function*> :
static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef pointer_iterator<Function::const_iterator> nodes_iterator;
+ using nodes_iterator = pointer_iterator<Function::const_iterator>;
static nodes_iterator nodes_begin(const Function *F) {
return nodes_iterator(F->begin());
diff --git a/contrib/llvm/include/llvm/IR/CallSite.h b/contrib/llvm/include/llvm/IR/CallSite.h
index d61431a51a97..4a806ab501e5 100644
--- a/contrib/llvm/include/llvm/IR/CallSite.h
+++ b/contrib/llvm/include/llvm/IR/CallSite.h
@@ -207,7 +207,7 @@ public:
/// The type of iterator to use when looping over actual arguments at this
/// call site.
- typedef IterTy arg_iterator;
+ using arg_iterator = IterTy;
iterator_range<IterTy> args() const {
return make_range(arg_begin(), arg_end());
@@ -231,7 +231,7 @@ public:
/// Type of iterator to use when looping over data operands at this call site
/// (see below).
- typedef IterTy data_operand_iterator;
+ using data_operand_iterator = IterTy;
/// data_operands_begin/data_operands_end - Return iterators iterating over
/// the call / invoke argument list and bundle operands. For invokes, this is
diff --git a/contrib/llvm/include/llvm/IR/CallingConv.h b/contrib/llvm/include/llvm/IR/CallingConv.h
index 604e99c8b52c..39fb3f1c791b 100644
--- a/contrib/llvm/include/llvm/IR/CallingConv.h
+++ b/contrib/llvm/include/llvm/IR/CallingConv.h
@@ -1,4 +1,4 @@
-//===-- llvm/CallingConv.h - LLVM Calling Conventions -----------*- C++ -*-===//
+//===- llvm/CallingConv.h - LLVM Calling Conventions ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,8 +20,9 @@ namespace llvm {
/// the well-known calling conventions.
///
namespace CallingConv {
+
/// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
- typedef unsigned ID;
+ using ID = unsigned;
/// A set of enums which specify the assigned numeric values for known llvm
/// calling conventions.
@@ -203,8 +204,9 @@ namespace CallingConv {
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
-} // End CallingConv namespace
-} // End llvm namespace
+} // end namespace CallingConv
+
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CALLINGCONV_H
diff --git a/contrib/llvm/include/llvm/IR/ConstantRange.h b/contrib/llvm/include/llvm/IR/ConstantRange.h
index fd7f96abb19e..6a50a8801f86 100644
--- a/contrib/llvm/include/llvm/IR/ConstantRange.h
+++ b/contrib/llvm/include/llvm/IR/ConstantRange.h
@@ -41,7 +41,7 @@ namespace llvm {
class MDNode;
/// This class represents a range of values.
-class ConstantRange {
+class LLVM_NODISCARD ConstantRange {
APInt Lower, Upper;
public:
@@ -167,7 +167,10 @@ public:
APInt getSetSize() const;
/// Compare set size of this range with the range CR.
- bool isSizeStrictlySmallerThanOf(const ConstantRange &CR) const;
+ bool isSizeStrictlySmallerThan(const ConstantRange &CR) const;
+
+ // Compare set size of this range with Value.
+ bool isSizeLargerThan(uint64_t MaxSize) const;
/// Return the largest unsigned value contained in the ConstantRange.
APInt getUnsignedMax() const;
diff --git a/contrib/llvm/include/llvm/IR/DataLayout.h b/contrib/llvm/include/llvm/IR/DataLayout.h
index 1930d48577d4..c1d398f17b59 100644
--- a/contrib/llvm/include/llvm/IR/DataLayout.h
+++ b/contrib/llvm/include/llvm/IR/DataLayout.h
@@ -1,4 +1,4 @@
-//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===//
+//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,27 +20,32 @@
#ifndef LLVM_IR_DATALAYOUT_H
#define LLVM_IR_DATALAYOUT_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
// This needs to be outside of the namespace, to avoid conflict with llvm-c
// decl.
-typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
+using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;
namespace llvm {
-class Value;
-class StructType;
-class StructLayout;
-class Triple;
class GlobalVariable;
class LLVMContext;
-template<typename T>
-class ArrayRef;
+class Module;
+class StructLayout;
+class Triple;
+class Value;
/// Enum used to categorize the alignment types stored by LayoutAlignElem
enum AlignTypeEnum {
@@ -72,6 +77,7 @@ struct LayoutAlignElem {
static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width);
+
bool operator==(const LayoutAlignElem &rhs) const;
};
@@ -90,6 +96,7 @@ struct PointerAlignElem {
/// Initializer
static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
unsigned PrefAlign, uint32_t TypeByteWidth);
+
bool operator==(const PointerAlignElem &rhs) const;
};
@@ -121,7 +128,7 @@ private:
/// \brief Primitive type alignment data. This is sorted by type and bit
/// width during construction.
- typedef SmallVector<LayoutAlignElem, 16> AlignmentsTy;
+ using AlignmentsTy = SmallVector<LayoutAlignElem, 16>;
AlignmentsTy Alignments;
AlignmentsTy::const_iterator
@@ -136,7 +143,7 @@ private:
/// \brief The string representation used to create this DataLayout
std::string StringRepresentation;
- typedef SmallVector<PointerAlignElem, 8> PointersTy;
+ using PointersTy = SmallVector<PointerAlignElem, 8>;
PointersTy Pointers;
PointersTy::const_iterator
@@ -147,7 +154,7 @@ private:
PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace);
// The StructType -> StructLayout map.
- mutable void *LayoutMap;
+ mutable void *LayoutMap = nullptr;
/// Pointers in these address spaces are non-integral, and don't have a
/// well-defined bitwise representation.
@@ -172,16 +179,16 @@ private:
public:
/// Constructs a DataLayout from a specification string. See reset().
- explicit DataLayout(StringRef LayoutDescription) : LayoutMap(nullptr) {
+ explicit DataLayout(StringRef LayoutDescription) {
reset(LayoutDescription);
}
/// Initialize target data from properties stored in the module.
explicit DataLayout(const Module *M);
- void init(const Module *M);
+ DataLayout(const DataLayout &DL) { *this = DL; }
- DataLayout(const DataLayout &DL) : LayoutMap(nullptr) { *this = DL; }
+ ~DataLayout(); // Not virtual, do not subclass this class
DataLayout &operator=(const DataLayout &DL) {
clear();
@@ -200,7 +207,7 @@ public:
bool operator==(const DataLayout &Other) const;
bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
- ~DataLayout(); // Not virtual, do not subclass this class
+ void init(const Module *M);
/// Parse a data layout string (with fallback to default values).
void reset(StringRef LayoutDescription);
@@ -489,6 +496,7 @@ class StructLayout {
unsigned IsPadded : 1;
unsigned NumElements : 31;
uint64_t MemberOffsets[1]; // variable sized array!
+
public:
uint64_t getSizeInBytes() const { return StructSize; }
@@ -515,6 +523,7 @@ public:
private:
friend class DataLayout; // Only DataLayout can create this class
+
StructLayout(StructType *ST, const DataLayout &DL);
};
@@ -560,6 +569,6 @@ inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
}
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_DATALAYOUT_H
diff --git a/contrib/llvm/include/llvm/IR/DebugInfo.h b/contrib/llvm/include/llvm/IR/DebugInfo.h
index 04f46197b1c3..1d8e7e2855fd 100644
--- a/contrib/llvm/include/llvm/IR/DebugInfo.h
+++ b/contrib/llvm/include/llvm/IR/DebugInfo.h
@@ -21,17 +21,12 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/DebugInfoMetadata.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <iterator>
namespace llvm {
-class Module;
+
class DbgDeclareInst;
class DbgValueInst;
-template <typename K, typename V, typename KeyInfoT, typename BucketT>
-class DenseMap;
+class Module;
/// \brief Find subprogram that is enclosing this scope.
DISubprogram *getDISubprogram(const MDNode *Scope);
@@ -95,13 +90,13 @@ private:
bool addScope(DIScope *Scope);
public:
- typedef SmallVectorImpl<DICompileUnit *>::const_iterator
- compile_unit_iterator;
- typedef SmallVectorImpl<DISubprogram *>::const_iterator subprogram_iterator;
- typedef SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator
- global_variable_expression_iterator;
- typedef SmallVectorImpl<DIType *>::const_iterator type_iterator;
- typedef SmallVectorImpl<DIScope *>::const_iterator scope_iterator;
+ using compile_unit_iterator =
+ SmallVectorImpl<DICompileUnit *>::const_iterator;
+ using subprogram_iterator = SmallVectorImpl<DISubprogram *>::const_iterator;
+ using global_variable_expression_iterator =
+ SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator;
+ using type_iterator = SmallVectorImpl<DIType *>::const_iterator;
+ using scope_iterator = SmallVectorImpl<DIScope *>::const_iterator;
iterator_range<compile_unit_iterator> compile_units() const {
return make_range(CUs.begin(), CUs.end());
@@ -140,4 +135,4 @@ private:
} // end namespace llvm
-#endif
+#endif // LLVM_IR_DEBUGINFO_H
diff --git a/contrib/llvm/include/llvm/IR/Dominators.h b/contrib/llvm/include/llvm/IR/Dominators.h
index 8f6c85f53efc..def91e73eb1d 100644
--- a/contrib/llvm/include/llvm/IR/Dominators.h
+++ b/contrib/llvm/include/llvm/IR/Dominators.h
@@ -42,7 +42,7 @@ extern template void Calculate<Function, Inverse<BasicBlock *>>(
DominatorTreeBaseByGraphTraits<GraphTraits<Inverse<BasicBlock *>>> &DT,
Function &F);
-typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
+using DomTreeNode = DomTreeNodeBase<BasicBlock>;
class BasicBlockEdge {
const BasicBlock *Start;
@@ -70,7 +70,7 @@ public:
};
template <> struct DenseMapInfo<BasicBlockEdge> {
- typedef DenseMapInfo<const BasicBlock *> BBInfo;
+ using BBInfo = DenseMapInfo<const BasicBlock *>;
static unsigned getHashValue(const BasicBlockEdge *V);
@@ -113,7 +113,7 @@ template <> struct DenseMapInfo<BasicBlockEdge> {
/// preceding statements; this is stated only to assist human understanding.
class DominatorTree : public DominatorTreeBase<BasicBlock> {
public:
- typedef DominatorTreeBase<BasicBlock> Base;
+ using Base = DominatorTreeBase<BasicBlock>;
DominatorTree() : DominatorTreeBase<BasicBlock>(false) {}
explicit DominatorTree(Function &F) : DominatorTreeBase<BasicBlock>(false) {
@@ -168,9 +168,9 @@ public:
// iterable by generic graph iterators.
template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
- typedef Node *NodeRef;
- typedef ChildIterator ChildIteratorType;
- typedef df_iterator<Node *, df_iterator_default_set<Node*>> nodes_iterator;
+ using NodeRef = Node *;
+ using ChildIteratorType = ChildIterator;
+ using nodes_iterator = df_iterator<Node *, df_iterator_default_set<Node*>>;
static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
@@ -212,7 +212,7 @@ class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
public:
/// \brief Provide the result typedef for this analysis pass.
- typedef DominatorTree Result;
+ using Result = DominatorTree;
/// \brief Run the analysis pass over a function and produce a dominator tree.
DominatorTree run(Function &F, FunctionAnalysisManager &);
diff --git a/contrib/llvm/include/llvm/IR/Function.h b/contrib/llvm/include/llvm/IR/Function.h
index f9582f51ca8d..c12a125b6352 100644
--- a/contrib/llvm/include/llvm/IR/Function.h
+++ b/contrib/llvm/include/llvm/IR/Function.h
@@ -466,7 +466,6 @@ public:
/// @brief Determine if the parameter or return value is marked with NoAlias
/// attribute.
- /// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool returnDoesNotAlias() const {
return AttributeSets.hasAttribute(AttributeList::ReturnIndex,
Attribute::NoAlias);
diff --git a/contrib/llvm/include/llvm/IR/InlineAsm.h b/contrib/llvm/include/llvm/IR/InlineAsm.h
index 5d2f72d211ff..a57e7d63012b 100644
--- a/contrib/llvm/include/llvm/IR/InlineAsm.h
+++ b/contrib/llvm/include/llvm/IR/InlineAsm.h
@@ -95,7 +95,7 @@ public:
isClobber // '~x'
};
- typedef std::vector<std::string> ConstraintCodeVector;
+ using ConstraintCodeVector = std::vector<std::string>;
struct SubConstraintInfo {
/// MatchingInput - If this is not -1, this is an output constraint where an
@@ -112,9 +112,9 @@ public:
SubConstraintInfo() = default;
};
- typedef std::vector<SubConstraintInfo> SubConstraintInfoVector;
+ using SubConstraintInfoVector = std::vector<SubConstraintInfo>;
struct ConstraintInfo;
- typedef std::vector<ConstraintInfo> ConstraintInfoVector;
+ using ConstraintInfoVector = std::vector<ConstraintInfo>;
struct ConstraintInfo {
/// Type - The basic type of the constraint: input/output/clobber
diff --git a/contrib/llvm/include/llvm/IR/InstIterator.h b/contrib/llvm/include/llvm/IR/InstIterator.h
index 28fc473f1490..2988fc935dd5 100644
--- a/contrib/llvm/include/llvm/IR/InstIterator.h
+++ b/contrib/llvm/include/llvm/IR/InstIterator.h
@@ -31,20 +31,20 @@ namespace llvm {
// inst_iterator and const_inst_iterator's.
//
template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
- typedef BB_t BBty;
- typedef BB_i_t BBIty;
- typedef BI_t BIty;
- typedef II_t IIty;
+ using BBty = BB_t;
+ using BBIty = BB_i_t;
+ using BIty = BI_t;
+ using IIty = II_t;
BB_t *BBs; // BasicBlocksType
BB_i_t BB; // BasicBlocksType::iterator
BI_t BI; // BasicBlock::iterator
public:
- typedef std::bidirectional_iterator_tag iterator_category;
- typedef IIty value_type;
- typedef signed difference_type;
- typedef IIty* pointer;
- typedef IIty& reference;
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = IIty;
+ using difference_type = signed;
+ using pointer = IIty *;
+ using reference = IIty &;
// Default constructor
InstIterator() = default;
@@ -119,13 +119,15 @@ private:
}
};
-typedef InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
- BasicBlock::iterator, Instruction> inst_iterator;
-typedef InstIterator<const SymbolTableList<BasicBlock>,
- Function::const_iterator, BasicBlock::const_iterator,
- const Instruction> const_inst_iterator;
-typedef iterator_range<inst_iterator> inst_range;
-typedef iterator_range<const_inst_iterator> const_inst_range;
+using inst_iterator =
+ InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
+ BasicBlock::iterator, Instruction>;
+using const_inst_iterator =
+ InstIterator<const SymbolTableList<BasicBlock>,
+ Function::const_iterator, BasicBlock::const_iterator,
+ const Instruction>;
+using inst_range = iterator_range<inst_iterator>;
+using const_inst_range = iterator_range<const_inst_iterator>;
inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); }
diff --git a/contrib/llvm/include/llvm/IR/InstrTypes.h b/contrib/llvm/include/llvm/IR/InstrTypes.h
index 6795b029cce9..d16a5d318d78 100644
--- a/contrib/llvm/include/llvm/IR/InstrTypes.h
+++ b/contrib/llvm/include/llvm/IR/InstrTypes.h
@@ -1,4 +1,4 @@
-//===-- llvm/InstrTypes.h - Important Instruction subclasses ----*- C++ -*-===//
+//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -29,7 +29,9 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -114,17 +116,17 @@ public:
template <class Term, class BB> // Successor Iterator
class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB,
int, BB *, BB *> {
- typedef std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>
- super;
+ using super =
+ std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>;
public:
- typedef typename super::pointer pointer;
- typedef typename super::reference reference;
+ using pointer = typename super::pointer;
+ using reference = typename super::reference;
private:
Term TermInst;
unsigned idx;
- typedef SuccIterator<Term, BB> Self;
+ using Self = SuccIterator<Term, BB>;
inline bool index_is_valid(unsigned idx) {
return idx < TermInst->getNumSuccessors();
@@ -260,11 +262,11 @@ public:
}
};
- typedef SuccIterator<TerminatorInst *, BasicBlock> succ_iterator;
- typedef SuccIterator<const TerminatorInst *, const BasicBlock>
- succ_const_iterator;
- typedef iterator_range<succ_iterator> succ_range;
- typedef iterator_range<succ_const_iterator> succ_const_range;
+ using succ_iterator = SuccIterator<TerminatorInst *, BasicBlock>;
+ using succ_const_iterator =
+ SuccIterator<const TerminatorInst *, const BasicBlock>;
+ using succ_range = iterator_range<succ_iterator>;
+ using succ_const_range = iterator_range<succ_const_iterator>;
private:
inline succ_iterator succ_begin() { return succ_iterator(this); }
@@ -341,14 +343,16 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
class BinaryOperator : public Instruction {
protected:
- void init(BinaryOps iType);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, Instruction *InsertBefore);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
+ void init(BinaryOps iType);
+
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
BinaryOperator *cloneImpl() const;
public:
@@ -1125,8 +1129,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
//===----------------------------------------------------------------------===//
class FuncletPadInst : public Instruction {
private:
- void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
-
FuncletPadInst(const FuncletPadInst &CPI);
explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
@@ -1136,11 +1138,14 @@ private:
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
+ void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
friend class CatchPadInst;
friend class CleanupPadInst;
+
FuncletPadInst *cloneImpl() const;
public:
@@ -1261,7 +1266,8 @@ public:
ArrayRef<InputTy> inputs() const { return Inputs; }
- typedef typename std::vector<InputTy>::const_iterator input_iterator;
+ using input_iterator = typename std::vector<InputTy>::const_iterator;
+
size_t input_size() const { return Inputs.size(); }
input_iterator input_begin() const { return Inputs.begin(); }
input_iterator input_end() const { return Inputs.end(); }
@@ -1269,8 +1275,8 @@ public:
StringRef getTag() const { return Tag; }
};
-typedef OperandBundleDefT<Value *> OperandBundleDef;
-typedef OperandBundleDefT<const Value *> ConstOperandBundleDef;
+using OperandBundleDef = OperandBundleDefT<Value *>;
+using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
/// \brief A mixin to add operand bundle functionality to llvm instruction
/// classes.
@@ -1553,8 +1559,8 @@ protected:
return OperandBundleUse(BOI.Tag, Inputs);
}
- typedef BundleOpInfo *bundle_op_iterator;
- typedef const BundleOpInfo *const_bundle_op_iterator;
+ using bundle_op_iterator = BundleOpInfo *;
+ using const_bundle_op_iterator = const BundleOpInfo *;
/// \brief Return the start of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
@@ -1654,6 +1660,6 @@ protected:
}
};
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_IR_INSTRTYPES_H
diff --git a/contrib/llvm/include/llvm/IR/Intrinsics.td b/contrib/llvm/include/llvm/IR/Intrinsics.td
index cf7e5d8758a9..7b78d4d3d34a 100644
--- a/contrib/llvm/include/llvm/IR/Intrinsics.td
+++ b/contrib/llvm/include/llvm/IR/Intrinsics.td
@@ -795,6 +795,14 @@ def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
+// Xray intrinsics
+//===----------------------------------------------------------------------===//
+// Custom event logging for x-ray.
+// Takes a pointer to a string and the length of the string.
+def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+ [NoCapture<0>, ReadOnly<0>, IntrWriteMem]>;
+//===----------------------------------------------------------------------===//
+
//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
//
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsARM.td b/contrib/llvm/include/llvm/IR/IntrinsicsARM.td
index 18ed24be56d4..fe3861301689 100644
--- a/contrib/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -22,12 +22,26 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
// and return value are essentially chains, used to force ordering during ISel.
def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// 16-bit multiplications
+def int_arm_smulbb : GCCBuiltin<"__builtin_arm_smulbb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulbt : GCCBuiltin<"__builtin_arm_smulbt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smultb : GCCBuiltin<"__builtin_arm_smultb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smultt : GCCBuiltin<"__builtin_arm_smultt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulwb : GCCBuiltin<"__builtin_arm_smulwb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulwt : GCCBuiltin<"__builtin_arm_smulwt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
//===----------------------------------------------------------------------===//
// Saturating Arithmetic
def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, Commutative]>;
+ [Commutative, IntrNoMem]>;
def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
@@ -35,6 +49,176 @@ def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Accumulating multiplications
+def int_arm_smlabb : GCCBuiltin<"__builtin_arm_smlabb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlabt : GCCBuiltin<"__builtin_arm_smlabt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlatb : GCCBuiltin<"__builtin_arm_smlatb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlatt : GCCBuiltin<"__builtin_arm_smlatt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlawb : GCCBuiltin<"__builtin_arm_smlawb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlawt : GCCBuiltin<"__builtin_arm_smlawt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+// Parallel 16-bit saturation
+def int_arm_ssat16 : GCCBuiltin<"__builtin_arm_ssat16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat16 : GCCBuiltin<"__builtin_arm_usat16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Packing and unpacking
+def int_arm_sxtab16 : GCCBuiltin<"__builtin_arm_sxtab16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_sxtb16 : GCCBuiltin<"__builtin_arm_sxtb16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uxtab16 : GCCBuiltin<"__builtin_arm_uxtab16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uxtb16 : GCCBuiltin<"__builtin_arm_uxtb16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// Parallel selection, reads the GE flags.
+def int_arm_sel : GCCBuiltin<"__builtin_arm_sel">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
+
+// Parallel 8-bit addition and subtraction
+def int_arm_qadd8 : GCCBuiltin<"__builtin_arm_qadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsub8 : GCCBuiltin<"__builtin_arm_qsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_sadd8 : GCCBuiltin<"__builtin_arm_sadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_shadd8 : GCCBuiltin<"__builtin_arm_shadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsub8 : GCCBuiltin<"__builtin_arm_shsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_ssub8 : GCCBuiltin<"__builtin_arm_ssub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uadd8 : GCCBuiltin<"__builtin_arm_uadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_uhadd8 : GCCBuiltin<"__builtin_arm_uhadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsub8 : GCCBuiltin<"__builtin_arm_uhsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqadd8 : GCCBuiltin<"__builtin_arm_uqadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsub8 : GCCBuiltin<"__builtin_arm_uqsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_usub8 : GCCBuiltin<"__builtin_arm_usub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// Sum of 8-bit absolute differences
+def int_arm_usad8 : GCCBuiltin<"__builtin_arm_usad8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usada8 : GCCBuiltin<"__builtin_arm_usada8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+// Parallel 16-bit addition and subtraction
+def int_arm_qadd16 : GCCBuiltin<"__builtin_arm_qadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qasx : GCCBuiltin<"__builtin_arm_qasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsax : GCCBuiltin<"__builtin_arm_qsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsub16 : GCCBuiltin<"__builtin_arm_qsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_sadd16 : GCCBuiltin<"__builtin_arm_sadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_sasx : GCCBuiltin<"__builtin_arm_sasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_shadd16 : GCCBuiltin<"__builtin_arm_shadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shasx : GCCBuiltin<"__builtin_arm_shasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsax : GCCBuiltin<"__builtin_arm_shsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsub16 : GCCBuiltin<"__builtin_arm_shsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_ssax : GCCBuiltin<"__builtin_arm_ssax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_ssub16 : GCCBuiltin<"__builtin_arm_ssub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uadd16 : GCCBuiltin<"__builtin_arm_uadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uasx : GCCBuiltin<"__builtin_arm_uasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_uhadd16 : GCCBuiltin<"__builtin_arm_uhadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhasx : GCCBuiltin<"__builtin_arm_uhasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsax : GCCBuiltin<"__builtin_arm_uhsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsub16 : GCCBuiltin<"__builtin_arm_uhsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqadd16 : GCCBuiltin<"__builtin_arm_uqadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqasx : GCCBuiltin<"__builtin_arm_uqasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsax : GCCBuiltin<"__builtin_arm_uqsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsub16 : GCCBuiltin<"__builtin_arm_uqsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_usax : GCCBuiltin<"__builtin_arm_usax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_usub16 : GCCBuiltin<"__builtin_arm_usub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// Parallel 16-bit multiplication
+def int_arm_smlad : GCCBuiltin<"__builtin_arm_smlad">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smladx : GCCBuiltin<"__builtin_arm_smladx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlald : GCCBuiltin<"__builtin_arm_smlald">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smlaldx : GCCBuiltin<"__builtin_arm_smlaldx">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smlsd : GCCBuiltin<"__builtin_arm_smlsd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlsdx : GCCBuiltin<"__builtin_arm_smlsdx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlsld : GCCBuiltin<"__builtin_arm_smlsld">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smlsldx : GCCBuiltin<"__builtin_arm_smlsldx">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smuad : GCCBuiltin<"__builtin_arm_smuad">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smuadx : GCCBuiltin<"__builtin_arm_smuadx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smusd : GCCBuiltin<"__builtin_arm_smusd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smusdx : GCCBuiltin<"__builtin_arm_smusdx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+
//===----------------------------------------------------------------------===//
// Load, Store and Clear exclusive
diff --git a/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h b/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
index a7274fbfbced..53570bdf16f4 100644
--- a/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/contrib/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -45,58 +45,54 @@ struct CalleeInfo {
}
};
-/// Struct to hold value either by GUID or GlobalValue*. Values in combined
-/// indexes as well as indirect calls are GUIDs, all others are GlobalValues.
-struct ValueInfo {
- /// The value representation used in this instance.
- enum ValueInfoKind {
- VI_GUID,
- VI_Value,
- };
+class GlobalValueSummary;
- /// Union of the two possible value types.
- union ValueUnion {
- GlobalValue::GUID Id;
- const GlobalValue *GV;
- ValueUnion(GlobalValue::GUID Id) : Id(Id) {}
- ValueUnion(const GlobalValue *GV) : GV(GV) {}
- };
+typedef std::vector<std::unique_ptr<GlobalValueSummary>> GlobalValueSummaryList;
- /// The value being represented.
- ValueUnion TheValue;
- /// The value representation.
- ValueInfoKind Kind;
- /// Constructor for a GUID value
- ValueInfo(GlobalValue::GUID Id = 0) : TheValue(Id), Kind(VI_GUID) {}
- /// Constructor for a GlobalValue* value
- ValueInfo(const GlobalValue *V) : TheValue(V), Kind(VI_Value) {}
- /// Accessor for GUID value
- GlobalValue::GUID getGUID() const {
- assert(Kind == VI_GUID && "Not a GUID type");
- return TheValue.Id;
- }
- /// Accessor for GlobalValue* value
- const GlobalValue *getValue() const {
- assert(Kind == VI_Value && "Not a Value type");
- return TheValue.GV;
- }
- bool isGUID() const { return Kind == VI_GUID; }
+struct GlobalValueSummaryInfo {
+ /// The GlobalValue corresponding to this summary. This is only used in
+ /// per-module summaries.
+ const GlobalValue *GV = nullptr;
+
+ /// List of global value summary structures for a particular value held
+ /// in the GlobalValueMap. Requires a vector in the case of multiple
+ /// COMDAT values of the same name.
+ GlobalValueSummaryList SummaryList;
};
-template <> struct DenseMapInfo<ValueInfo> {
- static inline ValueInfo getEmptyKey() { return ValueInfo((GlobalValue *)-1); }
- static inline ValueInfo getTombstoneKey() {
- return ValueInfo((GlobalValue *)-2);
+/// Map from global value GUID to corresponding summary structures. Use a
+/// std::map rather than a DenseMap so that pointers to the map's value_type
+/// (which are used by ValueInfo) are not invalidated by insertion. Also it will
+/// likely incur less overhead, as the value type is not very small and the size
+/// of the map is unknown, resulting in inefficiencies due to repeated
+/// insertions and resizing.
+typedef std::map<GlobalValue::GUID, GlobalValueSummaryInfo>
+ GlobalValueSummaryMapTy;
+
+/// Struct that holds a reference to a particular GUID in a global value
+/// summary.
+struct ValueInfo {
+ const GlobalValueSummaryMapTy::value_type *Ref = nullptr;
+ ValueInfo() = default;
+ ValueInfo(const GlobalValueSummaryMapTy::value_type *Ref) : Ref(Ref) {}
+ operator bool() const { return Ref; }
+
+ GlobalValue::GUID getGUID() const { return Ref->first; }
+ const GlobalValue *getValue() const { return Ref->second.GV; }
+ ArrayRef<std::unique_ptr<GlobalValueSummary>> getSummaryList() const {
+ return Ref->second.SummaryList;
}
- static bool isEqual(ValueInfo L, ValueInfo R) {
- if (L.isGUID() != R.isGUID())
- return false;
- return L.isGUID() ? (L.getGUID() == R.getGUID())
- : (L.getValue() == R.getValue());
+};
+
+template <> struct DenseMapInfo<ValueInfo> {
+ static inline ValueInfo getEmptyKey() {
+ return ValueInfo((GlobalValueSummaryMapTy::value_type *)-1);
}
- static unsigned getHashValue(ValueInfo I) {
- return I.isGUID() ? I.getGUID() : (uintptr_t)I.getValue();
+ static inline ValueInfo getTombstoneKey() {
+ return ValueInfo((GlobalValueSummaryMapTy::value_type *)-2);
}
+ static bool isEqual(ValueInfo L, ValueInfo R) { return L.Ref == R.Ref; }
+ static unsigned getHashValue(ValueInfo I) { return (uintptr_t)I.Ref; }
};
/// \brief Function and variable summary information to aid decisions and
@@ -483,19 +479,6 @@ struct TypeIdSummary {
/// 160 bits SHA1
typedef std::array<uint32_t, 5> ModuleHash;
-/// List of global value summary structures for a particular value held
-/// in the GlobalValueMap. Requires a vector in the case of multiple
-/// COMDAT values of the same name.
-typedef std::vector<std::unique_ptr<GlobalValueSummary>> GlobalValueSummaryList;
-
-/// Map from global value GUID to corresponding summary structures.
-/// Use a std::map rather than a DenseMap since it will likely incur
-/// less overhead, as the value type is not very small and the size
-/// of the map is unknown, resulting in inefficiencies due to repeated
-/// insertions and resizing.
-typedef std::map<GlobalValue::GUID, GlobalValueSummaryList>
- GlobalValueSummaryMapTy;
-
/// Type used for iterating through the global value summary map.
typedef GlobalValueSummaryMapTy::const_iterator const_gvsummary_iterator;
typedef GlobalValueSummaryMapTy::iterator gvsummary_iterator;
@@ -532,6 +515,11 @@ private:
// YAML I/O support.
friend yaml::MappingTraits<ModuleSummaryIndex>;
+ GlobalValueSummaryMapTy::value_type *
+ getOrInsertValuePtr(GlobalValue::GUID GUID) {
+ return &*GlobalValueMap.emplace(GUID, GlobalValueSummaryInfo{}).first;
+ }
+
public:
gvsummary_iterator begin() { return GlobalValueMap.begin(); }
const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
@@ -539,21 +527,22 @@ public:
const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
size_t size() const { return GlobalValueMap.size(); }
- /// Get the list of global value summary objects for a given value name.
- const GlobalValueSummaryList &getGlobalValueSummaryList(StringRef ValueName) {
- return GlobalValueMap[GlobalValue::getGUID(ValueName)];
+ /// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
+ ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
+ auto I = GlobalValueMap.find(GUID);
+ return ValueInfo(I == GlobalValueMap.end() ? nullptr : &*I);
}
- /// Get the list of global value summary objects for a given value name.
- const const_gvsummary_iterator
- findGlobalValueSummaryList(StringRef ValueName) const {
- return GlobalValueMap.find(GlobalValue::getGUID(ValueName));
+ /// Return a ValueInfo for \p GUID.
+ ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID) {
+ return ValueInfo(getOrInsertValuePtr(GUID));
}
- /// Get the list of global value summary objects for a given value GUID.
- const const_gvsummary_iterator
- findGlobalValueSummaryList(GlobalValue::GUID ValueGUID) const {
- return GlobalValueMap.find(ValueGUID);
+ /// Return a ValueInfo for \p GV and mark it as belonging to GV.
+ ValueInfo getOrInsertValueInfo(const GlobalValue *GV) {
+ auto VP = getOrInsertValuePtr(GV->getGUID());
+ VP->second.GV = GV;
+ return ValueInfo(VP);
}
/// Return the GUID for \p OriginalId in the OidGuidMap.
@@ -565,17 +554,18 @@ public:
/// Add a global value summary for a value of the given name.
void addGlobalValueSummary(StringRef ValueName,
std::unique_ptr<GlobalValueSummary> Summary) {
- addOriginalName(GlobalValue::getGUID(ValueName),
- Summary->getOriginalName());
- GlobalValueMap[GlobalValue::getGUID(ValueName)].push_back(
- std::move(Summary));
+ addGlobalValueSummary(getOrInsertValueInfo(GlobalValue::getGUID(ValueName)),
+ std::move(Summary));
}
- /// Add a global value summary for a value of the given GUID.
- void addGlobalValueSummary(GlobalValue::GUID ValueGUID,
+ /// Add a global value summary for the given ValueInfo.
+ void addGlobalValueSummary(ValueInfo VI,
std::unique_ptr<GlobalValueSummary> Summary) {
- addOriginalName(ValueGUID, Summary->getOriginalName());
- GlobalValueMap[ValueGUID].push_back(std::move(Summary));
+ addOriginalName(VI.getGUID(), Summary->getOriginalName());
+ // Here we have a notionally const VI, but the value it points to is owned
+ // by the non-const *this.
+ const_cast<GlobalValueSummaryMapTy::value_type *>(VI.Ref)
+ ->second.SummaryList.push_back(std::move(Summary));
}
/// Add an original name for the value of the given GUID.
@@ -593,16 +583,16 @@ public:
/// not found.
GlobalValueSummary *findSummaryInModule(GlobalValue::GUID ValueGUID,
StringRef ModuleId) const {
- auto CalleeInfoList = findGlobalValueSummaryList(ValueGUID);
- if (CalleeInfoList == end()) {
+ auto CalleeInfo = getValueInfo(ValueGUID);
+ if (!CalleeInfo) {
return nullptr; // This function does not have a summary
}
auto Summary =
- llvm::find_if(CalleeInfoList->second,
+ llvm::find_if(CalleeInfo.getSummaryList(),
[&](const std::unique_ptr<GlobalValueSummary> &Summary) {
return Summary->modulePath() == ModuleId;
});
- if (Summary == CalleeInfoList->second.end())
+ if (Summary == CalleeInfo.getSummaryList().end())
return nullptr;
return Summary->get();
}
diff --git a/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h b/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
index 80719c696935..78fdb602027d 100644
--- a/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
+++ b/contrib/llvm/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -201,7 +201,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
for (auto &FSum : FSums) {
GlobalValueSummary::GVFlags GVFlags(GlobalValue::ExternalLinkage, false,
false);
- Elem.push_back(llvm::make_unique<FunctionSummary>(
+ Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
GVFlags, 0, ArrayRef<ValueInfo>{},
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
std::move(FSum.TypeTestAssumeVCalls),
@@ -213,7 +213,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
static void output(IO &io, GlobalValueSummaryMapTy &V) {
for (auto &P : V) {
std::vector<FunctionSummaryYaml> FSums;
- for (auto &Sum : P.second) {
+ for (auto &Sum : P.second.SummaryList) {
if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get()))
FSums.push_back(FunctionSummaryYaml{
FSum->type_tests(), FSum->type_test_assume_vcalls(),
diff --git a/contrib/llvm/include/llvm/MC/ConstantPools.h b/contrib/llvm/include/llvm/MC/ConstantPools.h
index c34211c2bd12..5d4e32a672dd 100644
--- a/contrib/llvm/include/llvm/MC/ConstantPools.h
+++ b/contrib/llvm/include/llvm/MC/ConstantPools.h
@@ -63,6 +63,8 @@ public:
// Return true if the constant pool is empty
bool empty();
+
+ void clearCache();
};
class AssemblerConstantPools {
@@ -86,6 +88,7 @@ class AssemblerConstantPools {
public:
void emitAll(MCStreamer &Streamer);
void emitForCurrentSection(MCStreamer &Streamer);
+ void clearCacheForCurrentSection(MCStreamer &Streamer);
const MCExpr *addEntry(MCStreamer &Streamer, const MCExpr *Expr,
unsigned Size, SMLoc Loc);
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
index 1b6aaf4be666..8b9b49737170 100644
--- a/contrib/llvm/include/llvm/Object/COFF.h
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -20,7 +20,9 @@
#include "llvm/Object/Binary.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/COFF.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
@@ -40,6 +42,7 @@ class DelayImportDirectoryEntryRef;
class ExportDirectoryEntryRef;
class ImportDirectoryEntryRef;
class ImportedSymbolRef;
+class ResourceSectionRef;
using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>;
using delay_import_directory_iterator =
@@ -623,6 +626,26 @@ struct coff_base_reloc_block_entry {
int getOffset() const { return Data & ((1 << 12) - 1); }
};
+struct coff_resource_dir_entry {
+ union {
+ support::ulittle32_t NameOffset;
+ support::ulittle32_t ID;
+ uint32_t getNameOffset() const {
+ return maskTrailingOnes<uint32_t>(31) & NameOffset;
+ }
+ } Identifier;
+ union {
+ support::ulittle32_t DataEntryOffset;
+ support::ulittle32_t SubdirOffset;
+
+ bool isSubDir() const { return SubdirOffset >> 31; }
+ uint32_t value() const {
+ return maskTrailingOnes<uint32_t>(31) & SubdirOffset;
+ }
+
+ } Offset;
+};
+
struct coff_resource_dir_table {
support::ulittle32_t Characteristics;
support::ulittle32_t TimeDateStamp;
@@ -1047,6 +1070,23 @@ private:
const COFFObjectFile *OwningObject = nullptr;
};
+class ResourceSectionRef {
+public:
+ ResourceSectionRef() = default;
+ explicit ResourceSectionRef(StringRef Ref) : BBS(Ref, support::little) {}
+
+ ErrorOr<ArrayRef<UTF16>> getEntryNameString(const coff_resource_dir_entry &Entry);
+ ErrorOr<const coff_resource_dir_table &>
+ getEntrySubDir(const coff_resource_dir_entry &Entry);
+ ErrorOr<const coff_resource_dir_table &> getBaseTable();
+
+private:
+ BinaryByteStream BBS;
+
+ ErrorOr<const coff_resource_dir_table &> getTableAtOffset(uint32_t Offset);
+ ErrorOr<ArrayRef<UTF16>> getDirStringAtOffset(uint32_t Offset);
+};
+
// Corresponds to `_FPO_DATA` structure in the PE/COFF spec.
struct FpoData {
support::ulittle32_t Offset; // ulOffStart: Offset 1st byte of function code
diff --git a/contrib/llvm/include/llvm/Object/Wasm.h b/contrib/llvm/include/llvm/Object/Wasm.h
index 6b6bbe252f65..4bc39d98b7af 100644
--- a/contrib/llvm/include/llvm/Object/Wasm.h
+++ b/contrib/llvm/include/llvm/Object/Wasm.h
@@ -41,10 +41,14 @@ public:
DEBUG_FUNCTION_NAME,
};
- WasmSymbol(StringRef Name, SymbolType Type) : Name(Name), Type(Type) {}
+ WasmSymbol(StringRef Name, SymbolType Type, uint32_t Section,
+ uint32_t ElementIndex)
+ : Name(Name), Type(Type), Section(Section), ElementIndex(ElementIndex) {}
StringRef Name;
SymbolType Type;
+ uint32_t Section;
+ uint32_t ElementIndex;
};
class WasmSection {
diff --git a/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h b/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h
index dfeeb8589f82..bd7d72be4dbc 100644
--- a/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h
+++ b/contrib/llvm/include/llvm/ObjectYAML/WasmYAML.h
@@ -97,6 +97,11 @@ struct DataSegment {
yaml::BinaryRef Content;
};
+struct NameEntry {
+ uint32_t Index;
+ StringRef Name;
+};
+
struct Signature {
Signature() : Form(wasm::WASM_TYPE_FUNC) {}
@@ -122,6 +127,11 @@ struct CustomSection : Section {
StringRef Name;
yaml::BinaryRef Payload;
+
+ // The follow is used by the "name" custom section.
+ // TODO(sbc): Add support for more then just functions names. The wasm
+ // name section can support multiple sub-sections.
+ std::vector<NameEntry> FunctionNames;
};
struct TypeSection : Section {
@@ -244,6 +254,7 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Global)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Function)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::LocalDecl)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Relocation)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::NameEntry)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(uint32_t)
namespace llvm {
@@ -297,6 +308,10 @@ template <> struct MappingTraits<WasmYAML::Relocation> {
static void mapping(IO &IO, WasmYAML::Relocation &Relocation);
};
+template <> struct MappingTraits<WasmYAML::NameEntry> {
+ static void mapping(IO &IO, WasmYAML::NameEntry &NameEntry);
+};
+
template <> struct MappingTraits<WasmYAML::LocalDecl> {
static void mapping(IO &IO, WasmYAML::LocalDecl &LocalDecl);
};
diff --git a/contrib/llvm/include/llvm/Support/AArch64TargetParser.def b/contrib/llvm/include/llvm/Support/AArch64TargetParser.def
index 1700deadeaef..8eccebcd932a 100644
--- a/contrib/llvm/include/llvm/Support/AArch64TargetParser.def
+++ b/contrib/llvm/include/llvm/Support/AArch64TargetParser.def
@@ -20,8 +20,7 @@ AARCH64_ARCH("invalid", AK_INVALID, nullptr, nullptr,
ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE)
AARCH64_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
FK_CRYPTO_NEON_FP_ARMV8,
- (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
- AArch64::AEK_SIMD))
+ (AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD))
AARCH64_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
@@ -52,38 +51,37 @@ AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
#endif
AARCH64_CPU_NAME("cortex-a35", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a53", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true,
- ( AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a57", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a72", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a73", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_NONE))
AARCH64_CPU_NAME("exynos-m1", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m2", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m3", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("falkor", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("kryo", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderx2t99", AK_ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_CRC |
- AArch64::AEK_CRYPTO))
+ (AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt88", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt81", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt83", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
// Invalid CPU
AARCH64_CPU_NAME("invalid", AK_INVALID, FK_INVALID, true, AArch64::AEK_INVALID)
#undef AARCH64_CPU_NAME
diff --git a/contrib/llvm/include/llvm/Support/BinaryStreamArray.h b/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
index f141c30f16c7..bad31cd38d6a 100644
--- a/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
+++ b/contrib/llvm/include/llvm/Support/BinaryStreamArray.h
@@ -64,8 +64,10 @@ class VarStreamArrayIterator
public:
VarStreamArrayIterator() = default;
VarStreamArrayIterator(const ArrayType &Array, const WrappedCtx &Ctx,
- BinaryStreamRef Stream, bool *HadError = nullptr)
- : IterRef(Stream), Ctx(&Ctx), Array(&Array), HadError(HadError) {
+ BinaryStreamRef Stream, bool *HadError = nullptr,
+ uint32_t Offset = 0)
+ : IterRef(Stream), Ctx(&Ctx), Array(&Array), AbsOffset(Offset),
+ HadError(HadError) {
if (IterRef.getLength() == 0)
moveToEnd();
else {
@@ -115,6 +117,7 @@ public:
for (unsigned I = 0; I < N; ++I) {
// We are done with the current record, discard it so that we are
// positioned at the next record.
+ AbsOffset += ThisLen;
IterRef = IterRef.drop_front(ThisLen);
if (IterRef.getLength() == 0) {
// There is nothing after the current record, we must make this an end
@@ -135,6 +138,8 @@ public:
return *this;
}
+ uint32_t offset() const { return AbsOffset; }
+
private:
void moveToEnd() {
Array = nullptr;
@@ -152,6 +157,7 @@ private:
const WrappedCtx *Ctx{nullptr};
const ArrayType *Array{nullptr};
uint32_t ThisLen{0};
+ uint32_t AbsOffset{0};
bool HasError{false};
bool *HadError{nullptr};
};
@@ -234,7 +240,7 @@ public:
/// since the behavior is undefined if \p Offset does not refer to the
/// beginning of a valid record.
Iterator at(uint32_t Offset) const {
- return Iterator(*this, Ctx, Stream.drop_front(Offset), nullptr);
+ return Iterator(*this, Ctx, Stream.drop_front(Offset), nullptr, Offset);
}
BinaryStreamRef getUnderlyingStream() const { return Stream; }
@@ -338,7 +344,7 @@ private:
template <typename T>
class FixedStreamArrayIterator
: public iterator_facade_base<FixedStreamArrayIterator<T>,
- std::random_access_iterator_tag, T> {
+ std::random_access_iterator_tag, const T> {
public:
FixedStreamArrayIterator(const FixedStreamArray<T> &Array, uint32_t Index)
@@ -352,6 +358,7 @@ public:
}
const T &operator*() const { return Array[Index]; }
+ const T &operator*() { return Array[Index]; }
bool operator==(const FixedStreamArrayIterator<T> &R) const {
assert(Array == R.Array);
diff --git a/contrib/llvm/include/llvm/Support/COFF.h b/contrib/llvm/include/llvm/Support/COFF.h
index 19223306bd07..bc2098e2b5cf 100644
--- a/contrib/llvm/include/llvm/Support/COFF.h
+++ b/contrib/llvm/include/llvm/Support/COFF.h
@@ -152,6 +152,30 @@ namespace COFF {
IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
};
+ enum ResourceTypeID {
+ RID_Cursor = 1,
+ RID_Bitmap = 2,
+ RID_Icon = 3,
+ RID_Menu = 4,
+ RID_Dialog = 5,
+ RID_String = 6,
+ RID_FontDir = 7,
+ RID_Font = 8,
+ RID_Accelerator = 9,
+ RID_RCData = 10,
+ RID_MessageTable = 11,
+ RID_Group_Cursor = 12,
+ RID_Group_Icon = 14,
+ RID_Version = 16,
+ RID_DLGInclude = 17,
+ RID_PlugPlay = 19,
+ RID_VXD = 20,
+ RID_AniCursor = 21,
+ RID_AniIcon = 22,
+ RID_HTML = 23,
+ RID_Manifest = 24,
+ };
+
struct symbol {
char Name[NameSize];
uint32_t Value;
@@ -349,6 +373,26 @@ namespace COFF {
IMAGE_REL_ARM_BLX23T = 0x0015
};
+ enum RelocationTypesARM64 {
+ IMAGE_REL_ARM64_ABSOLUTE = 0x0000,
+ IMAGE_REL_ARM64_ADDR32 = 0x0001,
+ IMAGE_REL_ARM64_ADDR32NB = 0x0002,
+ IMAGE_REL_ARM64_BRANCH26 = 0x0003,
+ IMAGE_REL_ARM64_PAGEBASE_REL2 = 0x0004,
+ IMAGE_REL_ARM64_REL21 = 0x0005,
+ IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006,
+ IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007,
+ IMAGE_REL_ARM64_SECREL = 0x0008,
+ IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009,
+ IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A,
+ IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B,
+ IMAGE_REL_ARM64_TOKEN = 0x000C,
+ IMAGE_REL_ARM64_SECTION = 0x000D,
+ IMAGE_REL_ARM64_ADDR64 = 0x000E,
+ IMAGE_REL_ARM64_BRANCH19 = 0x000F,
+ IMAGE_REL_ARM64_BRANCH14 = 0x0010,
+ };
+
enum COMDATType {
IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
IMAGE_COMDAT_SELECT_ANY,
diff --git a/contrib/llvm/include/llvm/Support/KnownBits.h b/contrib/llvm/include/llvm/Support/KnownBits.h
index 292ea9e4b717..3d38cf878538 100644
--- a/contrib/llvm/include/llvm/Support/KnownBits.h
+++ b/contrib/llvm/include/llvm/Support/KnownBits.h
@@ -24,6 +24,12 @@ struct KnownBits {
APInt Zero;
APInt One;
+private:
+ // Internal constructor for creating a ConstantRange from two APInts.
+ KnownBits(APInt Zero, APInt One)
+ : Zero(std::move(Zero)), One(std::move(One)) {}
+
+public:
// Default construct Zero and One.
KnownBits() {}
@@ -37,6 +43,55 @@ struct KnownBits {
return Zero.getBitWidth();
}
+ /// Returns true if there is conflicting information.
+ bool hasConflict() const { return Zero.intersects(One); }
+
+ /// Returns true if we know the value of all bits.
+ bool isConstant() const {
+ assert(!hasConflict() && "KnownBits conflict!");
+ return Zero.countPopulation() + One.countPopulation() == getBitWidth();
+ }
+
+ /// Returns the value when all bits have a known value. This just returns One
+ /// with a protective assertion.
+ const APInt &getConstant() const {
+ assert(isConstant() && "Can only get value when all bits are known");
+ return One;
+ }
+
+ /// Returns true if we don't know any bits.
+ bool isUnknown() const { return Zero.isNullValue() && One.isNullValue(); }
+
+ /// Resets the known state of all bits.
+ void resetAll() {
+ Zero.clearAllBits();
+ One.clearAllBits();
+ }
+
+ /// Returns true if value is all zero.
+ bool isZero() const {
+ assert(!hasConflict() && "KnownBits conflict!");
+ return Zero.isAllOnesValue();
+ }
+
+ /// Returns true if value is all one bits.
+ bool isAllOnes() const {
+ assert(!hasConflict() && "KnownBits conflict!");
+ return One.isAllOnesValue();
+ }
+
+ /// Make all bits known to be zero and discard any previous information.
+ void setAllZero() {
+ Zero.setAllBits();
+ One.clearAllBits();
+ }
+
+ /// Make all bits known to be one and discard any previous information.
+ void setAllOnes() {
+ Zero.clearAllBits();
+ One.setAllBits();
+ }
+
/// Returns true if this value is known to be negative.
bool isNegative() const { return One.isSignBitSet(); }
@@ -54,6 +109,30 @@ struct KnownBits {
assert(!isNegative() && "Can't make a negative value non-negative");
Zero.setSignBit();
}
+
+ /// Truncate the underlying known Zero and One bits. This is equivalent
+ /// to truncating the value we're tracking.
+ KnownBits trunc(unsigned BitWidth) {
+ return KnownBits(Zero.trunc(BitWidth), One.trunc(BitWidth));
+ }
+
+ /// Zero extends the underlying known Zero and One bits. This is equivalent
+ /// to zero extending the value we're tracking.
+ KnownBits zext(unsigned BitWidth) {
+ return KnownBits(Zero.zext(BitWidth), One.zext(BitWidth));
+ }
+
+ /// Sign extends the underlying known Zero and One bits. This is equivalent
+ /// to sign extending the value we're tracking.
+ KnownBits sext(unsigned BitWidth) {
+ return KnownBits(Zero.sext(BitWidth), One.sext(BitWidth));
+ }
+
+ /// Zero extends or truncates the underlying known Zero and One bits. This is
+ /// equivalent to zero extending or truncating the value we're tracking.
+ KnownBits zextOrTrunc(unsigned BitWidth) {
+ return KnownBits(Zero.zextOrTrunc(BitWidth), One.zextOrTrunc(BitWidth));
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
index 994456f9a681..7f07e8cc3a51 100644
--- a/contrib/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -214,6 +214,18 @@ template <typename T> T maskLeadingOnes(unsigned N) {
return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
}
+/// \brief Create a bitmask with the N right-most bits set to 0, and all other
+/// bits set to 1. Only unsigned types are allowed.
+template <typename T> T maskTrailingZeros(unsigned N) {
+ return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
+/// \brief Create a bitmask with the N left-most bits set to 0, and all other
+/// bits set to 1. Only unsigned types are allowed.
+template <typename T> T maskLeadingZeros(unsigned N) {
+ return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
/// \brief Get the index of the last set bit starting from the least
/// significant bit.
///
diff --git a/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 9f034220815f..a06c67fe814c 100644
--- a/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -25,25 +25,43 @@ class GINodeEquiv<Instruction i, SDNode node> {
SDNode Node = node;
}
-def : GINodeEquiv<G_ZEXT, zext>;
+// These are defined in the same order as the G_* instructions.
+def : GINodeEquiv<G_ANYEXT, anyext>;
def : GINodeEquiv<G_SEXT, sext>;
+def : GINodeEquiv<G_ZEXT, zext>;
+def : GINodeEquiv<G_TRUNC, trunc>;
+def : GINodeEquiv<G_BITCAST, bitconvert>;
+// G_INTTOPTR - SelectionDAG has no equivalent.
+// G_PTRTOINT - SelectionDAG has no equivalent.
+// G_CONSTANT - Not needed since constants aren't operators.
+// G_FCONSTANT - Not needed since constants aren't operators.
def : GINodeEquiv<G_ADD, add>;
def : GINodeEquiv<G_SUB, sub>;
def : GINodeEquiv<G_MUL, mul>;
-
+def : GINodeEquiv<G_SDIV, sdiv>;
+def : GINodeEquiv<G_UDIV, udiv>;
+def : GINodeEquiv<G_SREM, srem>;
+def : GINodeEquiv<G_UREM, urem>;
+def : GINodeEquiv<G_AND, and>;
def : GINodeEquiv<G_OR, or>;
def : GINodeEquiv<G_XOR, xor>;
-def : GINodeEquiv<G_AND, and>;
-
def : GINodeEquiv<G_SHL, shl>;
def : GINodeEquiv<G_LSHR, srl>;
def : GINodeEquiv<G_ASHR, sra>;
-
-def : GINodeEquiv<G_SDIV, sdiv>;
-def : GINodeEquiv<G_UDIV, udiv>;
-def : GINodeEquiv<G_SREM, srem>;
-def : GINodeEquiv<G_UREM, urem>;
-
+def : GINodeEquiv<G_SELECT, select>;
+def : GINodeEquiv<G_FNEG, fneg>;
+def : GINodeEquiv<G_FPEXT, fpextend>;
+def : GINodeEquiv<G_FPTRUNC, ftrunc>;
+def : GINodeEquiv<G_FPTOSI, fp_to_sint>;
+def : GINodeEquiv<G_FPTOUI, fp_to_uint>;
+def : GINodeEquiv<G_SITOFP, sint_to_fp>;
+def : GINodeEquiv<G_UITOFP, uint_to_fp>;
+def : GINodeEquiv<G_FADD, fadd>;
+def : GINodeEquiv<G_FSUB, fsub>;
+def : GINodeEquiv<G_FMUL, fmul>;
+def : GINodeEquiv<G_FDIV, fdiv>;
+def : GINodeEquiv<G_FREM, frem>;
+def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_BR, br>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index d7fbca93f59b..fc35b4527bc3 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -1002,6 +1002,16 @@ def PATCHABLE_TAIL_CALL : Instruction {
let hasSideEffects = 1;
let isReturn = 1;
}
+def PATCHABLE_EVENT_CALL : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins ptr_rc:$event, i8imm:$size);
+ let AsmString = "# XRay Custom Event Log.";
+ let usesCustomInserter = 1;
+ let isCall = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 1;
+}
def FENTRY_CALL : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins variable_ops);
diff --git a/contrib/llvm/include/llvm/Target/TargetOpcodes.def b/contrib/llvm/include/llvm/Target/TargetOpcodes.def
index 96db6e0a9769..36764249632d 100644
--- a/contrib/llvm/include/llvm/Target/TargetOpcodes.def
+++ b/contrib/llvm/include/llvm/Target/TargetOpcodes.def
@@ -182,6 +182,10 @@ HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_EXIT)
/// PATCHABLE_RET which specifically only works for return instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_TAIL_CALL)
+/// Wraps a logging call and its arguments with nop sleds. At runtime, this can be
+/// patched to insert instrumentation instructions.
+HANDLE_TARGET_OPCODE(PATCHABLE_EVENT_CALL)
+
/// The following generic opcodes are not supposed to appear after ISel.
/// This is something we might want to relax, but for now, this is convenient
/// to produce diagnostics.
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
index db6723da1e61..023d7af7f729 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -177,6 +177,7 @@ struct SanitizerCoverageOptions {
bool Use8bitCounters = false;
bool TracePC = false;
bool TracePCGuard = false;
+ bool NoPrune = false;
SanitizerCoverageOptions() = default;
};
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/Float2Int.h b/contrib/llvm/include/llvm/Transforms/Scalar/Float2Int.h
index a8042399fb08..206ee980109b 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar/Float2Int.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/Float2Int.h
@@ -31,7 +31,7 @@ public:
private:
void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
- ConstantRange seen(Instruction *I, ConstantRange R);
+ void seen(Instruction *I, ConstantRange R);
ConstantRange badRange();
ConstantRange unknownRange();
ConstantRange validateRange(ConstantRange R);
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index 863fbdba7e67..130e917e49d7 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -701,11 +701,10 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
return Op1;
}
- APInt KnownZero = Known0.Zero | Known1.Zero;
- APInt KnownOne = Known0.One & Known1.One;
- if ((KnownZero | KnownOne).isAllOnesValue()) {
- return ConstantInt::get(Op0->getType(), KnownOne);
- }
+ Known0.Zero |= Known1.Zero;
+ Known0.One &= Known1.One;
+ if (Known0.isConstant())
+ return ConstantInt::get(Op0->getType(), Known0.getConstant());
}
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index 7aa6abf8fa48..4a713f441ce8 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -1495,36 +1495,87 @@ static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
-static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
+ ICmpInst::Predicate Pred0, Pred1;
+ Value *A ,*B;
+ if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
+ !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
+ return nullptr;
+
+ // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
+ // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
+ // can eliminate Op0 from this 'or'.
+ if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
+ return Op1;
+
+ // Check for any combination of predicates that cover the entire range of
+ // possibilities.
+ if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
+ (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
+ (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
+ (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
+ return getTrue(Op0->getType());
+
+ return nullptr;
+}
+
+/// Test if a pair of compares with a shared operand and 2 constants has an
+/// empty set intersection, full set union, or if one compare is a superset of
+/// the other.
+static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
+ bool IsAnd) {
+ // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
+ if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
+ return nullptr;
+
+ const APInt *C0, *C1;
+ if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
+ !match(Cmp1->getOperand(1), m_APInt(C1)))
+ return nullptr;
+
+ auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
+ auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
+
+ // For and-of-comapares, check if the intersection is empty:
+ // (icmp X, C0) && (icmp X, C1) --> empty set --> false
+ if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
+ return getFalse(Cmp0->getType());
+
+ // For or-of-compares, check if the union is full:
+ // (icmp X, C0) || (icmp X, C1) --> full set --> true
+ if (!IsAnd && Range0.unionWith(Range1).isFullSet())
+ return getTrue(Cmp0->getType());
+
+ // Is one range a superset of the other?
+ // If this is and-of-compares, take the smaller set:
+ // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
+ // If this is or-of-compares, take the larger set:
+ // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
+ if (Range0.contains(Range1))
+ return IsAnd ? Cmp1 : Cmp0;
+ if (Range1.contains(Range0))
+ return IsAnd ? Cmp0 : Cmp1;
+
+ return nullptr;
+}
+
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
+static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
return X;
- // FIXME: This should be shared with or-of-icmps.
- // Look for this pattern: (icmp V, C0) & (icmp V, C1)).
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
+ return X;
+
+ // (icmp (add V, C0), C1) & (icmp V, C0)
Type *ITy = Op0->getType();
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
- if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
- match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
- // Make a constant range that's the intersection of the two icmp ranges.
- // If the intersection is empty, we know that the result is false.
- auto Range0 = ConstantRange::makeExactICmpRegion(Pred0, *C0);
- auto Range1 = ConstantRange::makeExactICmpRegion(Pred1, *C1);
- if (Range0.intersectWith(Range1).isEmptySet())
- return getFalse(ITy);
-
- // If a range is a superset of the other, the smaller set is all we need.
- if (Range0.contains(Range1))
- return Op1;
- if (Range1.contains(Range0))
- return Op0;
- }
-
- // (icmp (add V, C0), C1) & (icmp V, C0)
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
return nullptr;
@@ -1565,6 +1616,103 @@ static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
return nullptr;
}
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
+static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+ if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
+ return X;
+
+ if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
+ return X;
+
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
+ return X;
+
+ // (icmp (add V, C0), C1) | (icmp V, C0)
+ ICmpInst::Predicate Pred0, Pred1;
+ const APInt *C0, *C1;
+ Value *V;
+ if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
+ return nullptr;
+
+ if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
+ return nullptr;
+
+ auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
+ if (AddInst->getOperand(1) != Op1->getOperand(1))
+ return nullptr;
+
+ Type *ITy = Op0->getType();
+ bool isNSW = AddInst->hasNoSignedWrap();
+ bool isNUW = AddInst->hasNoUnsignedWrap();
+
+ const APInt Delta = *C1 - *C0;
+ if (C0->isStrictlyPositive()) {
+ if (Delta == 2) {
+ if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
+ return getTrue(ITy);
+ if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
+ return getTrue(ITy);
+ }
+ if (Delta == 1) {
+ if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
+ return getTrue(ITy);
+ if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
+ return getTrue(ITy);
+ }
+ }
+ if (C0->getBoolValue() && isNUW) {
+ if (Delta == 2)
+ if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ if (Delta == 1)
+ if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ }
+
+ return nullptr;
+}
+
+static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
+ bool IsAnd, CastInst *Cast) {
+ Value *V =
+ IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
+ if (!V)
+ return nullptr;
+ if (!Cast)
+ return V;
+
+ // If we looked through casts, we can only handle a constant simplification
+ // because we are not allowed to create a cast instruction here.
+ if (auto *C = dyn_cast<Constant>(V))
+ return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
+
+ return nullptr;
+}
+
+static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
+ // Look through casts of the 'and' operands to find compares.
+ auto *Cast0 = dyn_cast<CastInst>(Op0);
+ auto *Cast1 = dyn_cast<CastInst>(Op1);
+ if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
+ Cast0->getSrcTy() == Cast1->getSrcTy()) {
+ Op0 = Cast0->getOperand(0);
+ Op1 = Cast1->getOperand(0);
+ }
+
+ auto *Cmp0 = dyn_cast<ICmpInst>(Op0);
+ auto *Cmp1 = dyn_cast<ICmpInst>(Op1);
+ if (!Cmp0 || !Cmp1)
+ return nullptr;
+
+ if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
+ return V;
+ if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
+ return V;
+
+ return nullptr;
+}
+
/// Given operands for an And, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@@ -1615,32 +1763,8 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return Op1;
}
- if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
- if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
- if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
- return V;
- if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
- return V;
- }
- }
-
- // The compares may be hidden behind casts. Look through those and try the
- // same folds as above.
- auto *Cast0 = dyn_cast<CastInst>(Op0);
- auto *Cast1 = dyn_cast<CastInst>(Op1);
- if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
- Cast0->getSrcTy() == Cast1->getSrcTy()) {
- auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
- auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
- if (Cmp0 && Cmp1) {
- Instruction::CastOps CastOpc = Cast0->getOpcode();
- Type *ResultType = Cast0->getType();
- if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
- return ConstantExpr::getCast(CastOpc, V, ResultType);
- if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
- return ConstantExpr::getCast(CastOpc, V, ResultType);
- }
- }
+ if (Value *V = simplifyAndOrOfICmps(Op0, Op1, true))
+ return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
@@ -1678,86 +1802,6 @@ Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
}
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
- ICmpInst::Predicate Pred0, Pred1;
- Value *A ,*B;
- if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
- !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
- return nullptr;
-
- // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
- // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
- // can eliminate Op0 from this 'or'.
- if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
- return Op1;
-
- // Check for any combination of predicates that cover the entire range of
- // possibilities.
- if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
- (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
- (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
- (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
- return getTrue(Op0->getType());
-
- return nullptr;
-}
-
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
- if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
- return X;
-
- if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
- return X;
-
- // (icmp (add V, C0), C1) | (icmp V, C0)
- ICmpInst::Predicate Pred0, Pred1;
- const APInt *C0, *C1;
- Value *V;
- if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
- return nullptr;
-
- if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
- return nullptr;
-
- auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
- if (AddInst->getOperand(1) != Op1->getOperand(1))
- return nullptr;
-
- Type *ITy = Op0->getType();
- bool isNSW = AddInst->hasNoSignedWrap();
- bool isNUW = AddInst->hasNoUnsignedWrap();
-
- const APInt Delta = *C1 - *C0;
- if (C0->isStrictlyPositive()) {
- if (Delta == 2) {
- if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
- return getTrue(ITy);
- if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
- return getTrue(ITy);
- }
- if (Delta == 1) {
- if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
- return getTrue(ITy);
- if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
- return getTrue(ITy);
- }
- }
- if (C0->getBoolValue() && isNUW) {
- if (Delta == 2)
- if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
- return getTrue(ITy);
- if (Delta == 1)
- if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
- return getTrue(ITy);
- }
-
- return nullptr;
-}
-
/// Given operands for an Or, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@@ -1826,14 +1870,8 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
return Op0;
- if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
- if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
- if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
- return V;
- if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
- return V;
- }
- }
+ if (Value *V = simplifyAndOrOfICmps(Op0, Op1, false))
+ return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
@@ -4056,20 +4094,13 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
unsigned InVecNumElts = InVecTy->getVectorNumElements();
- auto *Op0Const = dyn_cast<Constant>(Op0);
- auto *Op1Const = dyn_cast<Constant>(Op1);
-
- // If all operands are constant, constant fold the shuffle.
- if (Op0Const && Op1Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
-
SmallVector<int, 32> Indices;
ShuffleVectorInst::getShuffleMask(Mask, Indices);
assert(MaskNumElts == Indices.size() &&
"Size of Indices not same as number of mask elements?");
- // If only one of the operands is constant, constant fold the shuffle if the
- // mask does not select elements from the variable operand.
+ // Canonicalization: If mask does not select elements from an input vector,
+ // replace that input vector with undef.
bool MaskSelects0 = false, MaskSelects1 = false;
for (unsigned i = 0; i != MaskNumElts; ++i) {
if (Indices[i] == -1)
@@ -4079,23 +4110,41 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
else
MaskSelects1 = true;
}
- if (!MaskSelects0 && Op1Const)
- return ConstantFoldShuffleVectorInstruction(UndefValue::get(InVecTy),
- Op1Const, Mask);
- if (!MaskSelects1 && Op0Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const,
- UndefValue::get(InVecTy), Mask);
+ if (!MaskSelects0)
+ Op0 = UndefValue::get(InVecTy);
+ if (!MaskSelects1)
+ Op1 = UndefValue::get(InVecTy);
+
+ auto *Op0Const = dyn_cast<Constant>(Op0);
+ auto *Op1Const = dyn_cast<Constant>(Op1);
+
+ // If all operands are constant, constant fold the shuffle.
+ if (Op0Const && Op1Const)
+ return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
+
+ // Canonicalization: if only one input vector is constant, it shall be the
+ // second one.
+ if (Op0Const && !Op1Const) {
+ std::swap(Op0, Op1);
+ for (int &Idx : Indices) {
+ if (Idx == -1)
+ continue;
+ Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
+ assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
+ "shufflevector mask index out of range");
+ }
+ Mask = ConstantDataVector::get(
+ Mask->getContext(),
+ makeArrayRef(reinterpret_cast<uint32_t *>(Indices.data()),
+ MaskNumElts));
+ }
// A shuffle of a splat is always the splat itself. Legal if the shuffle's
// value type is same as the input vectors' type.
if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
- if (!MaskSelects1 && RetTy == InVecTy &&
+ if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
OpShuf->getMask()->getSplatValue())
return Op0;
- if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op1))
- if (!MaskSelects0 && RetTy == InVecTy &&
- OpShuf->getMask()->getSplatValue())
- return Op1;
// Don't fold a shuffle with undef mask elements. This may get folded in a
// better way using demanded bits or other analysis.
@@ -4595,8 +4644,8 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
unsigned BitWidth = I->getType()->getScalarSizeInBits();
KnownBits Known(BitWidth);
computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
- if ((Known.Zero | Known.One).isAllOnesValue())
- Result = ConstantInt::get(I->getType(), Known.One);
+ if (Known.isConstant())
+ Result = ConstantInt::get(I->getType(), Known.getConstant());
}
/// If called on unreachable code, the above logic may report that the
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index a98383eaf4aa..a2b9015a8a1d 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -142,7 +142,7 @@ public:
return Val;
}
- ConstantRange getConstantRange() const {
+ const ConstantRange &getConstantRange() const {
assert(isConstantRange() &&
"Cannot get the constant-range of a non-constant-range!");
return Range;
@@ -250,7 +250,7 @@ public:
if (NewR.isFullSet())
markOverdefined();
else
- markConstantRange(NewR);
+ markConstantRange(std::move(NewR));
}
};
@@ -1079,8 +1079,8 @@ bool LazyValueInfoImpl::solveBlockValueSelect(LVILatticeVal &BBLV,
}
if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
- ConstantRange TrueCR = TrueVal.getConstantRange();
- ConstantRange FalseCR = FalseVal.getConstantRange();
+ const ConstantRange &TrueCR = TrueVal.getConstantRange();
+ const ConstantRange &FalseCR = FalseVal.getConstantRange();
Value *LHS = nullptr;
Value *RHS = nullptr;
SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
@@ -1649,7 +1649,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@@ -1686,7 +1686,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@@ -1712,7 +1712,7 @@ static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return LazyValueInfo::Unknown;
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (Pred == ICmpInst::ICMP_EQ) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::False;
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index 598138246445..471ccb62970d 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -537,7 +537,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
unsigned BitWidth = V->getType()->getIntegerBitWidth();
KnownBits Known(BitWidth);
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
- return Known.Zero.isAllOnesValue();
+ return Known.isZero();
}
// Per-component check doesn't work with zeroinitializer
@@ -558,7 +558,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
KnownBits Known(BitWidth);
computeKnownBits(Elem, Known, DL);
- if (Known.Zero.isAllOnesValue())
+ if (Known.isZero())
return true;
}
diff --git a/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index a83412506a07..99f900ae3932 100644
--- a/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -37,7 +37,8 @@ using namespace llvm;
// Walk through the operands of a given User via worklist iteration and populate
// the set of GlobalValue references encountered. Invoked either on an
// Instruction or a GlobalVariable (which walks its initializer).
-static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
+static void findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,
+ SetVector<ValueInfo> &RefEdges,
SmallPtrSet<const User *, 8> &Visited) {
SmallVector<const User *, 32> Worklist;
Worklist.push_back(CurUser);
@@ -61,7 +62,7 @@ static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
// the reference set unless it is a callee. Callees are handled
// specially by WriteFunction and are added to a separate list.
if (!(CS && CS.isCallee(&OI)))
- RefEdges.insert(GV);
+ RefEdges.insert(Index.getOrInsertValueInfo(GV));
continue;
}
Worklist.push_back(Operand);
@@ -198,7 +199,7 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
if (isa<DbgInfoIntrinsic>(I))
continue;
++NumInsts;
- findRefEdges(&I, RefEdges, Visited);
+ findRefEdges(Index, &I, RefEdges, Visited);
auto CS = ImmutableCallSite(&I);
if (!CS)
continue;
@@ -239,7 +240,9 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
// to record the call edge to the alias in that case. Eventually
// an alias summary will be created to associate the alias and
// aliasee.
- CallGraphEdges[cast<GlobalValue>(CalledValue)].updateHotness(Hotness);
+ CallGraphEdges[Index.getOrInsertValueInfo(
+ cast<GlobalValue>(CalledValue))]
+ .updateHotness(Hotness);
} else {
// Skip inline assembly calls.
if (CI && CI->isInlineAsm())
@@ -254,15 +257,16 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
ICallAnalysis.getPromotionCandidatesForInstruction(
&I, NumVals, TotalCount, NumCandidates);
for (auto &Candidate : CandidateProfileData)
- CallGraphEdges[Candidate.Value].updateHotness(
- getHotness(Candidate.Count, PSI));
+ CallGraphEdges[Index.getOrInsertValueInfo(Candidate.Value)]
+ .updateHotness(getHotness(Candidate.Count, PSI));
}
}
// Explicit add hot edges to enforce importing for designated GUIDs for
// sample PGO, to enable the same inlines as the profiled optimized binary.
for (auto &I : F.getImportGUIDs())
- CallGraphEdges[I].updateHotness(CalleeInfo::HotnessType::Hot);
+ CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
+ CalleeInfo::HotnessType::Hot);
bool NonRenamableLocal = isNonRenamableLocal(F);
bool NotEligibleForImport =
@@ -288,7 +292,7 @@ computeVariableSummary(ModuleSummaryIndex &Index, const GlobalVariable &V,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
SetVector<ValueInfo> RefEdges;
SmallPtrSet<const User *, 8> Visited;
- findRefEdges(&V, RefEdges, Visited);
+ findRefEdges(Index, &V, RefEdges, Visited);
bool NonRenamableLocal = isNonRenamableLocal(V);
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
/* LiveRoot = */ false);
@@ -317,12 +321,9 @@ computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
// Set LiveRoot flag on entries matching the given value name.
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
- auto SummaryList =
- Index.findGlobalValueSummaryList(GlobalValue::getGUID(Name));
- if (SummaryList == Index.end())
- return;
- for (auto &Summary : SummaryList->second)
- Summary->setLiveRoot();
+ if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
+ for (auto &Summary : VI.getSummaryList())
+ Summary->setLiveRoot();
}
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
@@ -446,12 +447,16 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
}
for (auto &GlobalList : Index) {
- assert(GlobalList.second.size() == 1 &&
+ // Ignore entries for references that are undefined in the current module.
+ if (GlobalList.second.SummaryList.empty())
+ continue;
+
+ assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected module's index to have one summary per GUID");
- auto &Summary = GlobalList.second[0];
+ auto &Summary = GlobalList.second.SummaryList[0];
bool AllRefsCanBeExternallyReferenced =
llvm::all_of(Summary->refs(), [&](const ValueInfo &VI) {
- return !CantBePromoted.count(VI.getValue()->getGUID());
+ return !CantBePromoted.count(VI.getGUID());
});
if (!AllRefsCanBeExternallyReferenced) {
Summary->setNotEligibleToImport();
@@ -461,9 +466,7 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
bool AllCallsCanBeExternallyReferenced = llvm::all_of(
FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
- auto GUID = Edge.first.isGUID() ? Edge.first.getGUID()
- : Edge.first.getValue()->getGUID();
- return !CantBePromoted.count(GUID);
+ return !CantBePromoted.count(Edge.first.getGUID());
});
if (!AllCallsCanBeExternallyReferenced)
Summary->setNotEligibleToImport();
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index bd747f7c0b7a..01dca0793145 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2970,7 +2970,7 @@ static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
else if (ABW < BBW)
A = A.zext(BBW);
- return APIntOps::GreatestCommonDivisor(A, B);
+ return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
}
/// Get a canonical unsigned division expression, or something simpler if
@@ -4083,6 +4083,56 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return None;
}
+/// A helper function for createAddRecFromPHI to handle simple cases.
+///
+/// This function tries to find an AddRec expression for the simplest (yet most
+/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
+/// If it fails, createAddRecFromPHI will use a more general, but slow,
+/// technique for finding the AddRec expression.
+const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
+ Value *BEValueV,
+ Value *StartValueV) {
+ const Loop *L = LI.getLoopFor(PN->getParent());
+ assert(L && L->getHeader() == PN->getParent());
+ assert(BEValueV && StartValueV);
+
+ auto BO = MatchBinaryOp(BEValueV, DT);
+ if (!BO)
+ return nullptr;
+
+ if (BO->Opcode != Instruction::Add)
+ return nullptr;
+
+ const SCEV *Accum = nullptr;
+ if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
+ Accum = getSCEV(BO->RHS);
+ else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
+ Accum = getSCEV(BO->LHS);
+
+ if (!Accum)
+ return nullptr;
+
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
+ if (BO->IsNUW)
+ Flags = setFlags(Flags, SCEV::FlagNUW);
+ if (BO->IsNSW)
+ Flags = setFlags(Flags, SCEV::FlagNSW);
+
+ const SCEV *StartVal = getSCEV(StartValueV);
+ const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
+
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
+
+ // We can add Flags to the post-inc expression only if we
+ // know that it is *undefined behavior* for BEValueV to
+ // overflow.
+ if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
+ if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
+ (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
+
+ return PHISCEV;
+}
+
const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
const Loop *L = LI.getLoopFor(PN->getParent());
if (!L || L->getHeader() != PN->getParent())
@@ -4111,10 +4161,16 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
if (!BEValueV || !StartValueV)
return nullptr;
- // While we are analyzing this PHI node, handle its value symbolically.
- const SCEV *SymbolicName = getUnknown(PN);
assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
"PHI node already processed?");
+
+ // First, try to find AddRec expression without creating a fictituos symbolic
+ // value for PN.
+ if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
+ return S;
+
+ // Handle PHI node value symbolically.
+ const SCEV *SymbolicName = getUnknown(PN);
ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
// Using this symbolic name for the PHI, analyze the value coming around
@@ -4189,7 +4245,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
// We can add Flags to the post-inc expression only if we
- // know that it us *undefined behavior* for BEValueV to
+ // know that it is *undefined behavior* for BEValueV to
// overflow.
if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
@@ -4744,7 +4800,7 @@ ScalarEvolution::getRange(const SCEV *S,
}
}
- return setRange(AddRec, SignHint, ConservativeResult);
+ return setRange(AddRec, SignHint, std::move(ConservativeResult));
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
@@ -4775,10 +4831,10 @@ ScalarEvolution::getRange(const SCEV *S,
APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
- return setRange(U, SignHint, ConservativeResult);
+ return setRange(U, SignHint, std::move(ConservativeResult));
}
- return setRange(S, SignHint, ConservativeResult);
+ return setRange(S, SignHint, std::move(ConservativeResult));
}
// Given a StartRange, Step and MaxBECount for an expression compute a range of
@@ -4786,8 +4842,8 @@ ScalarEvolution::getRange(const SCEV *S,
// from StartRange and then is changed by Step up to MaxBECount times. Signed
// argument defines if we treat Step as signed or unsigned.
static ConstantRange getRangeForAffineARHelper(APInt Step,
- ConstantRange StartRange,
- APInt MaxBECount,
+ const ConstantRange &StartRange,
+ const APInt &MaxBECount,
unsigned BitWidth, bool Signed) {
// If either Step or MaxBECount is 0, then the expression won't change, and we
// just need to return the initial range.
@@ -4826,8 +4882,8 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
// if the expression is decreasing and will be increased by Offset otherwise.
APInt StartLower = StartRange.getLower();
APInt StartUpper = StartRange.getUpper() - 1;
- APInt MovedBoundary =
- Descending ? (StartLower - Offset) : (StartUpper + Offset);
+ APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
+ : (StartUpper + std::move(Offset));
// It's possible that the new minimum/maximum value will fall into the initial
// range (due to wrap around). This means that the expression can take any
@@ -4835,21 +4891,18 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
if (StartRange.contains(MovedBoundary))
return ConstantRange(BitWidth, /* isFullSet = */ true);
- APInt NewLower, NewUpper;
- if (Descending) {
- NewLower = MovedBoundary;
- NewUpper = StartUpper;
- } else {
- NewLower = StartLower;
- NewUpper = MovedBoundary;
- }
+ APInt NewLower =
+ Descending ? std::move(MovedBoundary) : std::move(StartLower);
+ APInt NewUpper =
+ Descending ? std::move(StartUpper) : std::move(MovedBoundary);
+ NewUpper += 1;
// If we end up with full range, return a proper full range.
- if (NewLower == NewUpper + 1)
+ if (NewLower == NewUpper)
return ConstantRange(BitWidth, /* isFullSet = */ true);
// No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
- return ConstantRange(NewLower, NewUpper + 1);
+ return ConstantRange(std::move(NewLower), std::move(NewUpper));
}
ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
@@ -7323,7 +7376,6 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
const APInt &M = MC->getAPInt();
const APInt &N = NC->getAPInt();
APInt Two(BitWidth, 2);
- APInt Four(BitWidth, 4);
{
using namespace APIntOps;
@@ -7339,7 +7391,7 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
// Compute the B^2-4ac term.
APInt SqrtTerm(B);
SqrtTerm *= B;
- SqrtTerm -= Four * (A * C);
+ SqrtTerm -= 4 * (A * C);
if (SqrtTerm.isNegative()) {
// The loop is provably infinite.
@@ -8887,7 +8939,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
if (!Addend)
return false;
- APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
+ const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
// `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
// antecedent "`FoundLHS` `Pred` `FoundRHS`".
@@ -8899,7 +8951,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
// We can also compute the range of values for `LHS` that satisfy the
// consequent, "`LHS` `Pred` `RHS`":
- APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
+ const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
ConstantRange SatisfyingLHSRange =
ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
@@ -8924,7 +8976,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
- return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
+ return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).slt(MaxRHS);
}
APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
@@ -8933,7 +8985,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
- return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
+ return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).ult(MaxRHS);
}
bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
@@ -8950,7 +9002,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
- return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
+ return (std::move(MinValue) + std::move(MaxStrideMinusOne)).sgt(MinRHS);
}
APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
@@ -8959,7 +9011,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
- return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
+ return (std::move(MinValue) + std::move(MaxStrideMinusOne)).ugt(MinRHS);
}
const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
@@ -9250,9 +9302,8 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// the upper value of the range must be the first possible exit value.
// If A is negative then the lower of the range is the last possible loop
// value. Also note that we already checked for a full range.
- APInt One(BitWidth,1);
APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
- APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
+ APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
// The exit value should be (End+A)/A.
APInt ExitVal = (End + A).udiv(A);
@@ -9268,7 +9319,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// Ensure that the previous value is in the range. This is a sanity check.
assert(Range.contains(
EvaluateConstantChrecAtConstant(this,
- ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
+ ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
"Linear scev computation is off in a bad way!");
return SE.getConstant(ExitValue);
} else if (isQuadratic()) {
@@ -9574,7 +9625,7 @@ const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const {
+ const SCEV *ElementSize) {
if (Terms.size() < 1 || !ElementSize)
return;
@@ -9590,7 +9641,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
});
// Remove duplicates.
- std::sort(Terms.begin(), Terms.end());
+ array_pod_sort(Terms.begin(), Terms.end());
Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
// Put larger terms first.
@@ -9598,13 +9649,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
return numberOfTerms(LHS) > numberOfTerms(RHS);
});
- ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
-
// Try to divide all terms by the element size. If term is not divisible by
// element size, proceed with the original term.
for (const SCEV *&Term : Terms) {
const SCEV *Q, *R;
- SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
+ SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
if (!Q->isZero())
Term = Q;
}
@@ -9613,7 +9662,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
// Remove constant factors.
for (const SCEV *T : Terms)
- if (const SCEV *NewT = removeConstantFactors(SE, T))
+ if (const SCEV *NewT = removeConstantFactors(*this, T))
NewTerms.push_back(NewT);
DEBUG({
@@ -9622,8 +9671,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
dbgs() << *T << "\n";
});
- if (NewTerms.empty() ||
- !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
+ if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
Sizes.clear();
return;
}
diff --git a/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp b/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp
index be734fa91425..848e1b4717b5 100644
--- a/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -1176,6 +1176,10 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1) == SizeTTy && FTy.getParamType(2) == SizeTTy);
+ case LibFunc_wcslen:
+ return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
+ FTy.getReturnType()->isIntegerTy());
+
case LibFunc::NumLibFuncs:
break;
}
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index 6ec175fc84e2..a7f3ff672aef 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -59,8 +59,8 @@ static cl::opt<bool>
DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
cl::Hidden, cl::init(true));
-/// Returns the bitwidth of the given scalar or pointer type (if unknown returns
-/// 0). For vector types, returns the element type's bitwidth.
+/// Returns the bitwidth of the given scalar or pointer type. For vector types,
+/// returns the element type's bitwidth.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
@@ -342,7 +342,6 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
@@ -351,7 +350,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
TrailZ = std::min(TrailZ, BitWidth);
LeadZ = std::min(LeadZ, BitWidth);
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(TrailZ);
Known.Zero.setHighBits(LeadZ);
@@ -529,15 +528,13 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.clearAllBits();
- Known.One.setAllBits();
+ Known.setAllOnes();
return;
}
if (match(Arg, m_Not(m_Specific(V))) &&
isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.setAllBits();
- Known.One.clearAllBits();
+ Known.setAllZero();
return;
}
@@ -719,7 +716,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.One.isAllOnesValue() || RHSKnown.isNonNegative()) {
+ if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
Known.makeNonNegative();
}
@@ -741,7 +738,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.isNegative()) {
+ if (RHSKnown.isZero() || RHSKnown.isNegative()) {
// We know that the sign bit is one.
Known.makeNegative();
}
@@ -776,8 +773,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
// behavior, or we might have a bug in the compiler. We can't assert/crash, so
// clear out the known bits, try to warn the user, and hope for the best.
if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (Q.ORE) {
auto *CxtI = const_cast<Instruction *>(Q.CxtI);
@@ -813,10 +809,8 @@ static void computeKnownBitsFromShiftOperator(
// If there is conflict between Known.Zero and Known.One, this must be an
// overflowing left shift, so the shift result is undefined. Clear Known
// bits so that other code could propagate this undef.
- if ((Known.Zero & Known.One) != 0) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if ((Known.Zero & Known.One) != 0)
+ Known.resetAll();
return;
}
@@ -826,8 +820,7 @@ static void computeKnownBitsFromShiftOperator(
// If the shift amount could be greater than or equal to the bit-width of the LHS, the
// value could be undef, so we don't know anything about it.
if ((~Known.Zero).uge(BitWidth)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
@@ -839,8 +832,7 @@ static void computeKnownBitsFromShiftOperator(
// It would be more-clearly correct to use the two temporaries for this
// calculation. Reusing the APInts here to prevent unnecessary allocations.
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
// If we know the shifter operand is nonzero, we can sometimes infer more
// known bits. However this is expensive to compute, so be lazy about it and
@@ -886,10 +878,8 @@ static void computeKnownBitsFromShiftOperator(
// return anything we'd like, but we need to make sure the sets of known bits
// stay disjoint (it should be better for some other code to actually
// propagate the undef than to pick a value here using known bits).
- if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if (Known.Zero.intersects(Known.One))
+ Known.resetAll();
}
static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
@@ -924,7 +914,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
m_Value(Y))) ||
match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
m_Value(Y))))) {
- Known2.Zero.clearAllBits(); Known2.One.clearAllBits();
+ Known2.resetAll();
computeKnownBits(Y, Known2, Depth + 1, Q);
if (Known2.One.countTrailingOnes() > 0)
Known.Zero.setBit(0);
@@ -965,8 +955,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
unsigned LeadZ = Known2.Zero.countLeadingOnes();
- Known2.One.clearAllBits();
- Known2.Zero.clearAllBits();
+ Known2.resetAll();
computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
unsigned RHSUnknownLeadingOnes = Known2.One.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
@@ -1051,11 +1040,9 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
assert(SrcBitWidth && "SrcBitWidth can't be zero");
- Known.Zero = Known.Zero.zextOrTrunc(SrcBitWidth);
- Known.One = Known.One.zextOrTrunc(SrcBitWidth);
+ Known = Known.zextOrTrunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
- Known.Zero = Known.Zero.zextOrTrunc(BitWidth);
- Known.One = Known.One.zextOrTrunc(BitWidth);
+ Known = Known.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
if (BitWidth > SrcBitWidth)
Known.Zero.setBitsFrom(SrcBitWidth);
@@ -1076,13 +1063,11 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
- Known.Zero = Known.Zero.sext(BitWidth);
- Known.One = Known.One.sext(BitWidth);
+ Known = Known.sext(BitWidth);
break;
}
case Instruction::Shl: {
@@ -1202,8 +1187,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@@ -1504,8 +1488,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
- Known.One.clearAllBits();
- Known.Zero.setAllBits();
+ Known.setAllZero();
return;
}
// Handle a constant vector by taking the intersection of the known bits of
@@ -1532,8 +1515,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
Constant *Element = CV->getAggregateElement(i);
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
if (!ElementCI) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
Elt = ElementCI->getValue();
@@ -1544,7 +1526,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Start out not knowing anything.
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
// We can't imply anything about undefs.
if (isa<UndefValue>(V))
@@ -1590,13 +1572,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
/// Convenience wrapper around computeKnownBits.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth, const Query &Q) {
- unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
- if (!BitWidth) {
- KnownZero = false;
- KnownOne = false;
- return;
- }
- KnownBits Bits(BitWidth);
+ KnownBits Bits(getBitWidth(V->getType(), Q.DL));
computeKnownBits(V, Bits, Depth, Q);
KnownOne = Bits.isNegative();
KnownZero = Bits.isNonNegative();
@@ -1847,7 +1823,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
// if the lowest bit is shifted off the end.
- if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
+ if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
@@ -1906,7 +1882,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// If X and Y are both negative (as signed values) then their sum is not
// zero unless both X and Y equal INT_MIN.
- if (BitWidth && XKnownNegative && YKnownNegative) {
+ if (XKnownNegative && YKnownNegative) {
KnownBits Known(BitWidth);
APInt Mask = APInt::getSignedMaxValue(BitWidth);
// The sign bit of X is set. If some other bit is set then X is not equal
@@ -1971,7 +1947,6 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
return true;
}
- if (!BitWidth) return false;
KnownBits Known(BitWidth);
computeKnownBits(V, Known, Depth, Q);
return Known.One != 0;
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 8b6f79a81b93..580261a3b5e0 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -694,15 +694,16 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
/// Used to enable on-demand parsing of the VST.
uint64_t VSTOffset = 0;
- // Map to save ValueId to GUID association that was recorded in the
+ // Map to save ValueId to ValueInfo association that was recorded in the
// ValueSymbolTable. It is used after the VST is parsed to convert
// call graph edges read from the function summary from referencing
- // callees by their ValueId to using the GUID instead, which is how
+ // callees by their ValueId to using the ValueInfo instead, which is how
// they are recorded in the summary index being built.
- // We save a second GUID which is the same as the first one, but ignoring the
- // linkage, i.e. for value other than local linkage they are identical.
- DenseMap<unsigned, std::pair<GlobalValue::GUID, GlobalValue::GUID>>
- ValueIdToCallGraphGUIDMap;
+ // We save a GUID which refers to the same global as the ValueInfo, but
+ // ignoring the linkage, i.e. for values other than local linkage they are
+ // identical.
+ DenseMap<unsigned, std::pair<ValueInfo, GlobalValue::GUID>>
+ ValueIdToValueInfoMap;
/// Map populated during module path string table parsing, from the
/// module ID to a string reference owned by the index's module
@@ -742,8 +743,8 @@ private:
Error parseEntireSummary();
Error parseModuleStringTable();
- std::pair<GlobalValue::GUID, GlobalValue::GUID>
- getGUIDFromValueId(unsigned ValueId);
+ std::pair<ValueInfo, GlobalValue::GUID>
+ getValueInfoFromValueId(unsigned ValueId);
ModulePathStringTableTy::iterator addThisModulePath();
};
@@ -4697,11 +4698,11 @@ ModuleSummaryIndexBitcodeReader::addThisModulePath() {
return TheIndex.addModulePath(ModulePath, ModuleId);
}
-std::pair<GlobalValue::GUID, GlobalValue::GUID>
-ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
- auto VGI = ValueIdToCallGraphGUIDMap.find(ValueId);
- assert(VGI != ValueIdToCallGraphGUIDMap.end());
- return VGI->second;
+std::pair<ValueInfo, GlobalValue::GUID>
+ModuleSummaryIndexBitcodeReader::getValueInfoFromValueId(unsigned ValueId) {
+ auto VGI = ValueIdToValueInfoMap[ValueId];
+ assert(VGI.first);
+ return VGI;
}
void ModuleSummaryIndexBitcodeReader::setValueGUID(
@@ -4716,8 +4717,8 @@ void ModuleSummaryIndexBitcodeReader::setValueGUID(
if (PrintSummaryGUIDs)
dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
<< ValueName << "\n";
- ValueIdToCallGraphGUIDMap[ValueID] =
- std::make_pair(ValueGUID, OriginalNameID);
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(ValueGUID), OriginalNameID);
}
// Specialized value symbol table parser used when reading module index
@@ -4795,7 +4796,8 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
GlobalValue::GUID RefGUID = Record[1];
// The "original name", which is the second value of the pair will be
// overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index.
- ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
break;
}
}
@@ -4940,7 +4942,7 @@ ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) {
std::vector<ValueInfo> Ret;
Ret.reserve(Record.size());
for (uint64_t RefValueId : Record)
- Ret.push_back(getGUIDFromValueId(RefValueId).first);
+ Ret.push_back(getValueInfoFromValueId(RefValueId).first);
return Ret;
}
@@ -4950,14 +4952,14 @@ std::vector<FunctionSummary::EdgeTy> ModuleSummaryIndexBitcodeReader::makeCallLi
Ret.reserve(Record.size());
for (unsigned I = 0, E = Record.size(); I != E; ++I) {
CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown;
- GlobalValue::GUID CalleeGUID = getGUIDFromValueId(Record[I]).first;
+ ValueInfo Callee = getValueInfoFromValueId(Record[I]).first;
if (IsOldProfileFormat) {
I += 1; // Skip old callsitecount field
if (HasProfile)
I += 1; // Skip old profilecount field
} else if (HasProfile)
Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]);
- Ret.push_back(FunctionSummary::EdgeTy{CalleeGUID, CalleeInfo{Hotness}});
+ Ret.push_back(FunctionSummary::EdgeTy{Callee, CalleeInfo{Hotness}});
}
return Ret;
}
@@ -5027,7 +5029,8 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
case bitc::FS_VALUE_GUID: { // [valueid, refguid]
uint64_t ValueID = Record[0];
GlobalValue::GUID RefGUID = Record[1];
- ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
break;
}
// FS_PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
@@ -5068,10 +5071,10 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeCheckedLoadVCalls.clear();
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
- auto GUID = getGUIDFromValueId(ValueID);
+ auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID);
FS->setModulePath(addThisModulePath()->first());
- FS->setOriginalName(GUID.second);
- TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
+ FS->setOriginalName(VIAndOriginalGUID.second);
+ TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS));
break;
}
// FS_ALIAS: [valueid, flags, valueid]
@@ -5091,14 +5094,15 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
// ownership.
AS->setModulePath(addThisModulePath()->first());
- GlobalValue::GUID AliaseeGUID = getGUIDFromValueId(AliaseeID).first;
+ GlobalValue::GUID AliaseeGUID =
+ getValueInfoFromValueId(AliaseeID).first.getGUID();
auto AliaseeInModule =
TheIndex.findSummaryInModule(AliaseeGUID, ModulePath);
if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeInModule);
- auto GUID = getGUIDFromValueId(ValueID);
+ auto GUID = getValueInfoFromValueId(ValueID);
AS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(AS));
break;
@@ -5112,7 +5116,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
makeRefList(ArrayRef<uint64_t>(Record).slice(2));
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
FS->setModulePath(addThisModulePath()->first());
- auto GUID = getGUIDFromValueId(ValueID);
+ auto GUID = getValueInfoFromValueId(ValueID);
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
break;
@@ -5139,7 +5143,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
std::vector<FunctionSummary::EdgeTy> Edges = makeCallList(
ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
IsOldProfileFormat, HasProfile);
- GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
auto FS = llvm::make_unique<FunctionSummary>(
Flags, InstCount, std::move(Refs), std::move(Edges),
std::move(PendingTypeTests), std::move(PendingTypeTestAssumeVCalls),
@@ -5152,9 +5156,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
LastSeenSummary = FS.get();
- LastSeenGUID = GUID;
+ LastSeenGUID = VI.getGUID();
FS->setModulePath(ModuleIdMap[ModuleId]);
- TheIndex.addGlobalValueSummary(GUID, std::move(FS));
+ TheIndex.addGlobalValueSummary(VI, std::move(FS));
break;
}
// FS_COMBINED_ALIAS: [valueid, modid, flags, valueid]
@@ -5170,16 +5174,17 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
LastSeenSummary = AS.get();
AS->setModulePath(ModuleIdMap[ModuleId]);
- auto AliaseeGUID = getGUIDFromValueId(AliaseeValueId).first;
+ auto AliaseeGUID =
+ getValueInfoFromValueId(AliaseeValueId).first.getGUID();
auto AliaseeInModule =
TheIndex.findSummaryInModule(AliaseeGUID, AS->modulePath());
if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeInModule);
- GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
- LastSeenGUID = GUID;
- TheIndex.addGlobalValueSummary(GUID, std::move(AS));
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ LastSeenGUID = VI.getGUID();
+ TheIndex.addGlobalValueSummary(VI, std::move(AS));
break;
}
// FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
@@ -5193,9 +5198,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
LastSeenSummary = FS.get();
FS->setModulePath(ModuleIdMap[ModuleId]);
- GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
- LastSeenGUID = GUID;
- TheIndex.addGlobalValueSummary(GUID, std::move(FS));
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ LastSeenGUID = VI.getGUID();
+ TheIndex.addGlobalValueSummary(VI, std::move(FS));
break;
}
// FS_COMBINED_ORIGINAL_NAME: [original_name]
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 485d9b6ac0bc..1b8d81a60201 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -156,14 +156,14 @@ public:
return;
for (const auto &GUIDSummaryLists : *Index)
// Examine all summaries for this GUID.
- for (auto &Summary : GUIDSummaryLists.second)
+ for (auto &Summary : GUIDSummaryLists.second.SummaryList)
if (auto FS = dyn_cast<FunctionSummary>(Summary.get()))
// For each call in the function summary, see if the call
// is to a GUID (which means it is for an indirect call,
// otherwise we would have a Value for it). If so, synthesize
// a value id.
for (auto &CallEdge : FS->calls())
- if (CallEdge.first.isGUID())
+ if (!CallEdge.first.getValue())
assignValueId(CallEdge.first.getGUID());
}
@@ -304,7 +304,7 @@ private:
}
// Helper to get the valueId for the type of value recorded in VI.
unsigned getValueId(ValueInfo VI) {
- if (VI.isGUID())
+ if (!VI.getValue())
return getValueId(VI.getGUID());
return VE.getValueID(VI.getValue());
}
@@ -358,7 +358,7 @@ public:
Callback(Summary);
} else {
for (auto &Summaries : Index)
- for (auto &Summary : Summaries.second)
+ for (auto &Summary : Summaries.second.SummaryList)
Callback({Summaries.first, Summary.get()});
}
}
@@ -3270,15 +3270,14 @@ void ModuleBitcodeWriter::writePerModuleFunctionSummaryRecord(
void ModuleBitcodeWriter::writeModuleLevelReferences(
const GlobalVariable &V, SmallVector<uint64_t, 64> &NameVals,
unsigned FSModRefsAbbrev) {
- auto Summaries =
- Index->findGlobalValueSummaryList(GlobalValue::getGUID(V.getName()));
- if (Summaries == Index->end()) {
+ auto VI = Index->getValueInfo(GlobalValue::getGUID(V.getName()));
+ if (!VI || VI.getSummaryList().empty()) {
// Only declarations should not have a summary (a declaration might however
// have a summary if the def was in module level asm).
assert(V.isDeclaration());
return;
}
- auto *Summary = Summaries->second.front().get();
+ auto *Summary = VI.getSummaryList()[0].get();
NameVals.push_back(VE.getValueID(&V));
GlobalVarSummary *VS = cast<GlobalVarSummary>(Summary);
NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
@@ -3367,15 +3366,14 @@ void ModuleBitcodeWriter::writePerModuleGlobalValueSummary() {
if (!F.hasName())
report_fatal_error("Unexpected anonymous function when writing summary");
- auto Summaries =
- Index->findGlobalValueSummaryList(GlobalValue::getGUID(F.getName()));
- if (Summaries == Index->end()) {
+ ValueInfo VI = Index->getValueInfo(GlobalValue::getGUID(F.getName()));
+ if (!VI || VI.getSummaryList().empty()) {
// Only declarations should not have a summary (a declaration might
// however have a summary if the def was in module level asm).
assert(F.isDeclaration());
continue;
}
- auto *Summary = Summaries->second.front().get();
+ auto *Summary = VI.getSummaryList()[0].get();
writePerModuleFunctionSummaryRecord(NameVals, Summary, VE.getValueID(&F),
FSCallsAbbrev, FSCallsProfileAbbrev, F);
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index b11e30c359b3..7ddb86d80bf0 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2761,37 +2761,63 @@ void AsmPrinter::emitXRayTable() {
auto PrevSection = OutStreamer->getCurrentSectionOnly();
auto Fn = MF->getFunction();
- MCSection *Section = nullptr;
+ MCSection *InstMap = nullptr;
+ MCSection *FnSledIndex = nullptr;
if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) {
if (Fn->hasComdat()) {
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
+ InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
Fn->getComdat()->getName());
+ FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
+ Fn->getComdat()->getName());
} else {
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
+ InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC);
+ FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC);
}
} else if (MF->getSubtarget().getTargetTriple().isOSBinFormatMachO()) {
- Section = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
+ InstMap = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
SectionKind::getReadOnlyWithRel());
+ FnSledIndex = OutContext.getMachOSection("__DATA", "xray_fn_idx", 0,
+ SectionKind::getReadOnlyWithRel());
} else {
llvm_unreachable("Unsupported target");
}
// Before we switch over, we force a reference to a label inside the
- // xray_instr_map section. Since this function is always called just
- // before the function's end, we assume that this is happening after
- // the last return instruction.
-
+ // xray_instr_map and xray_fn_idx sections. Since this function is always
+ // called just before the function's end, we assume that this is happening
+ // after the last return instruction. We also use the synthetic label in the
+ // xray_inster_map as a delimeter for the range of sleds for this function in
+ // the index.
auto WordSizeBytes = MAI->getCodePointerSize();
- MCSymbol *Tmp = OutContext.createTempSymbol("xray_synthetic_", true);
+ MCSymbol *SledsStart = OutContext.createTempSymbol("xray_synthetic_", true);
+ MCSymbol *IdxRef = OutContext.createTempSymbol("xray_fn_idx_synth_", true);
OutStreamer->EmitCodeAlignment(16);
- OutStreamer->EmitSymbolValue(Tmp, WordSizeBytes, false);
- OutStreamer->SwitchSection(Section);
- OutStreamer->EmitLabel(Tmp);
+ OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes, false);
+ OutStreamer->EmitSymbolValue(IdxRef, WordSizeBytes, false);
+
+ // Now we switch to the instrumentation map section. Because this is done
+ // per-function, we are able to create an index entry that will represent the
+ // range of sleds associated with a function.
+ OutStreamer->SwitchSection(InstMap);
+ OutStreamer->EmitLabel(SledsStart);
for (const auto &Sled : Sleds)
Sled.emit(WordSizeBytes, OutStreamer.get(), CurrentFnSym);
-
+ MCSymbol *SledsEnd = OutContext.createTempSymbol("xray_synthetic_end", true);
+ OutStreamer->EmitLabel(SledsEnd);
+
+ // We then emit a single entry in the index per function. We use the symbols
+ // that bound the instrumentation map as the range for a specific function.
+ // Each entry here will be 2 * word size aligned, as we're writing down two
+ // pointers. This should work for both 32-bit and 64-bit platforms.
+ OutStreamer->SwitchSection(FnSledIndex);
+ OutStreamer->EmitCodeAlignment(2 * WordSizeBytes);
+ OutStreamer->EmitLabel(IdxRef);
+ OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes);
+ OutStreamer->EmitSymbolValue(SledsEnd, WordSizeBytes);
OutStreamer->SwitchSection(PrevSection);
Sleds.clear();
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 786b11618d75..87b45c001de4 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -469,7 +469,7 @@ void CodeViewDebug::emitTypeInformation() {
CommentPrefix += ' ';
}
- TypeDatabase TypeDB;
+ TypeDatabase TypeDB(TypeTable.records().size());
CVTypeDumper CVTD(TypeDB);
TypeTable.ForEachRecord([&](TypeIndex Index, ArrayRef<uint8_t> Record) {
if (OS.isVerboseAsm()) {
@@ -1705,10 +1705,12 @@ TypeIndex CodeViewDebug::lowerCompleteTypeClass(const DICompositeType *Ty) {
SizeInBytes, FullName, Ty->getIdentifier());
TypeIndex ClassTI = TypeTable.writeKnownType(CR);
- StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(Ty->getFile()));
- TypeIndex SIDI = TypeTable.writeKnownType(SIDR);
- UdtSourceLineRecord USLR(ClassTI, SIDI, Ty->getLine());
- TypeTable.writeKnownType(USLR);
+ if (const auto *File = Ty->getFile()) {
+ StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(File));
+ TypeIndex SIDI = TypeTable.writeKnownType(SIDR);
+ UdtSourceLineRecord USLR(ClassTI, SIDI, Ty->getLine());
+ TypeTable.writeKnownType(USLR);
+ }
addToUDTs(Ty, ClassTI);
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
index 2d01301402f0..b63d9f4a4351 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
@@ -1850,8 +1850,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
return false;
bool HasDups = false;
- SmallVector<unsigned, 4> LocalDefs;
- SmallSet<unsigned, 4> LocalDefsSet;
+ SmallVector<unsigned, 4> LocalDefs, LocalKills;
+ SmallSet<unsigned, 4> ActiveDefsSet, AllDefsSet;
MachineBasicBlock::iterator TIB = TBB->begin();
MachineBasicBlock::iterator FIB = FBB->begin();
MachineBasicBlock::iterator TIE = TBB->end();
@@ -1905,7 +1905,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
IsSafe = false;
break;
}
- } else if (!LocalDefsSet.count(Reg)) {
+ } else if (!ActiveDefsSet.count(Reg)) {
if (Defs.count(Reg)) {
// Use is defined by the instruction at the point of insertion.
IsSafe = false;
@@ -1925,18 +1925,22 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
break;
- // Remove kills from LocalDefsSet, these registers had short live ranges.
+ // Remove kills from ActiveDefsSet, these registers had short live ranges.
for (const MachineOperand &MO : TIB->operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned Reg = MO.getReg();
- if (!Reg || !LocalDefsSet.count(Reg))
+ if (!Reg)
+ continue;
+ if (!AllDefsSet.count(Reg)) {
+ LocalKills.push_back(Reg);
continue;
+ }
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
- LocalDefsSet.erase(*AI);
+ ActiveDefsSet.erase(*AI);
} else {
- LocalDefsSet.erase(Reg);
+ ActiveDefsSet.erase(Reg);
}
}
@@ -1948,7 +1952,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg))
continue;
LocalDefs.push_back(Reg);
- addRegAndItsAliases(Reg, TRI, LocalDefsSet);
+ addRegAndItsAliases(Reg, TRI, ActiveDefsSet);
+ addRegAndItsAliases(Reg, TRI, AllDefsSet);
}
HasDups = true;
@@ -1963,17 +1968,22 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
FBB->erase(FBB->begin(), FIB);
// Update livein's.
- bool AddedLiveIns = false;
+ bool ChangedLiveIns = false;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Def = LocalDefs[i];
- if (LocalDefsSet.count(Def)) {
+ if (ActiveDefsSet.count(Def)) {
TBB->addLiveIn(Def);
FBB->addLiveIn(Def);
- AddedLiveIns = true;
+ ChangedLiveIns = true;
}
}
+ for (unsigned K : LocalKills) {
+ TBB->removeLiveIn(K);
+ FBB->removeLiveIn(K);
+ ChangedLiveIns = true;
+ }
- if (AddedLiveIns) {
+ if (ChangedLiveIns) {
TBB->sortUniqueLiveIns();
FBB->sortUniqueLiveIns();
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 75be7a55bd2a..811858f136eb 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1108,6 +1108,14 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
default:
return false;
}
+ } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
+ if (CV->getNumOperands() == 1)
+ return translate(*CV->getOperand(0), Reg);
+ SmallVector<unsigned, 4> Ops;
+ for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
+ Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
+ }
+ EntryBuilder.buildMerge(Reg, Ops);
} else
return false;
@@ -1199,9 +1207,6 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
finishPendingPhis();
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- TLI.finalizeLowering(*MF);
-
// Merge the argument lowering and constants block with its single
// successor, the LLVM-IR entry block. We want the basic block to
// be maximal.
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index cf97c635e79a..a16e14fe2db6 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -24,6 +24,7 @@
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#define DEBUG_TYPE "instruction-select"
@@ -70,8 +71,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// An optimization remark emitter. Used to report failures.
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
- // FIXME: freezeReservedRegs is now done in IRTranslator, but there are many
- // other MF/MFI fields we need to initialize.
+ // FIXME: There are many other MF/MFI fields we need to initialize.
#ifndef NDEBUG
// Check that our input is fully legal: we require the function to have the
@@ -184,6 +184,9 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
return false;
}
+ auto &TLI = *MF.getSubtarget().getTargetLowering();
+ TLI.finalizeLowering(MF);
+
// FIXME: Should we accurately track changes?
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
index 74ed58e8d049..aec379197dfb 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -176,8 +176,13 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
unsigned NumNewInsns = 0;
SmallVector<MachineInstr *, 4> WorkList;
Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) {
- ++NumNewInsns;
- WorkList.push_back(MI);
+ // Only legalize pre-isel generic instructions.
+ // Legalization process could generate Target specific pseudo
+ // instructions with generic types. Don't record them
+ if (isPreISelGenericOpcode(MI->getOpcode())) {
+ ++NumNewInsns;
+ WorkList.push_back(MI);
+ }
});
WorkList.push_back(&*MI);
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index f935390a8d1b..7248f50945d0 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -213,21 +213,23 @@ uint64_t RegBankSelect::getRepairCost(
return UINT_MAX;
}
-RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
+const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
MachineInstr &MI, RegisterBankInfo::InstructionMappings &PossibleMappings,
SmallVectorImpl<RepairingPlacement> &RepairPts) {
assert(!PossibleMappings.empty() &&
"Do not know how to map this instruction");
- RegisterBankInfo::InstructionMapping *BestMapping = nullptr;
+ const RegisterBankInfo::InstructionMapping *BestMapping = nullptr;
MappingCost Cost = MappingCost::ImpossibleCost();
SmallVector<RepairingPlacement, 4> LocalRepairPts;
- for (RegisterBankInfo::InstructionMapping &CurMapping : PossibleMappings) {
- MappingCost CurCost = computeMapping(MI, CurMapping, LocalRepairPts, &Cost);
+ for (const RegisterBankInfo::InstructionMapping *CurMapping :
+ PossibleMappings) {
+ MappingCost CurCost =
+ computeMapping(MI, *CurMapping, LocalRepairPts, &Cost);
if (CurCost < Cost) {
DEBUG(dbgs() << "New best: " << CurCost << '\n');
Cost = CurCost;
- BestMapping = &CurMapping;
+ BestMapping = CurMapping;
RepairPts.clear();
for (RepairingPlacement &RepairPt : LocalRepairPts)
RepairPts.emplace_back(std::move(RepairPt));
@@ -237,7 +239,7 @@ RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
// If none of the mapping worked that means they are all impossible.
// Thus, pick the first one and set an impossible repairing point.
// It will trigger the failed isel mode.
- BestMapping = &(*PossibleMappings.begin());
+ BestMapping = *PossibleMappings.begin();
RepairPts.emplace_back(
RepairingPlacement(MI, 0, *TRI, *this, RepairingPlacement::Impossible));
} else
@@ -543,10 +545,10 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
// Remember the repairing placement for all the operands.
SmallVector<RepairingPlacement, 4> RepairPts;
- RegisterBankInfo::InstructionMapping BestMapping;
+ const RegisterBankInfo::InstructionMapping *BestMapping;
if (OptMode == RegBankSelect::Mode::Fast) {
- BestMapping = RBI->getInstrMapping(MI);
- MappingCost DefaultCost = computeMapping(MI, BestMapping, RepairPts);
+ BestMapping = &RBI->getInstrMapping(MI);
+ MappingCost DefaultCost = computeMapping(MI, *BestMapping, RepairPts);
(void)DefaultCost;
if (DefaultCost == MappingCost::ImpossibleCost())
return false;
@@ -555,16 +557,16 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
RBI->getInstrPossibleMappings(MI);
if (PossibleMappings.empty())
return false;
- BestMapping = std::move(findBestMapping(MI, PossibleMappings, RepairPts));
+ BestMapping = &findBestMapping(MI, PossibleMappings, RepairPts);
}
// Make sure the mapping is valid for MI.
- assert(BestMapping.verify(MI) && "Invalid instruction mapping");
+ assert(BestMapping->verify(MI) && "Invalid instruction mapping");
- DEBUG(dbgs() << "Best Mapping: " << BestMapping << '\n');
+ DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
// After this call, MI may not be valid anymore.
// Do not use it.
- return applyMapping(MI, BestMapping, RepairPts);
+ return applyMapping(MI, *BestMapping, RepairPts);
}
bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
index d5ae9a6776a4..a841902feed1 100644
--- a/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
@@ -45,6 +45,10 @@ STATISTIC(NumOperandsMappingsCreated,
"Number of operands mappings dynamically created");
STATISTIC(NumOperandsMappingsAccessed,
"Number of operands mappings dynamically accessed");
+STATISTIC(NumInstructionMappingsCreated,
+ "Number of instruction mappings dynamically created");
+STATISTIC(NumInstructionMappingsAccessed,
+ "Number of instruction mappings dynamically accessed");
const unsigned RegisterBankInfo::DefaultMappingID = UINT_MAX;
const unsigned RegisterBankInfo::InvalidMappingID = UINT_MAX - 1;
@@ -137,7 +141,7 @@ static bool isCopyLike(const MachineInstr &MI) {
MI.getOpcode() == TargetOpcode::REG_SEQUENCE;
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
// For copies we want to walk over the operands and try to find one
// that has a register bank since the instruction itself will not get
@@ -147,9 +151,6 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
// is important. The rest is not constrained.
unsigned NumOperandsForMapping = IsCopyLike ? 1 : MI.getNumOperands();
- RegisterBankInfo::InstructionMapping Mapping(DefaultMappingID, /*Cost*/ 1,
- /*OperandsMapping*/ nullptr,
- NumOperandsForMapping);
const MachineFunction &MF = *MI.getParent()->getParent();
const TargetSubtargetInfo &STI = MF.getSubtarget();
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
@@ -190,7 +191,7 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
if (!IsCopyLike)
// MI does not carry enough information to guess the mapping.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
continue;
}
}
@@ -206,11 +207,13 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
if (IsCopyLike && !CompleteMapping)
// No way to deduce the type from what we have.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
assert(CompleteMapping && "Setting an uncomplete mapping");
- Mapping.setOperandsMapping(getOperandsMapping(OperandsMapping));
- return Mapping;
+ return getInstructionMapping(
+ DefaultMappingID, /*Cost*/ 1,
+ /*OperandsMapping*/ getOperandsMapping(OperandsMapping),
+ NumOperandsForMapping);
}
/// Hashing function for PartialMapping.
@@ -320,9 +323,44 @@ const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping(
return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end());
}
-RegisterBankInfo::InstructionMapping
+static hash_code
+hashInstructionMapping(unsigned ID, unsigned Cost,
+ const RegisterBankInfo::ValueMapping *OperandsMapping,
+ unsigned NumOperands) {
+ return hash_combine(ID, Cost, OperandsMapping, NumOperands);
+}
+
+const RegisterBankInfo::InstructionMapping &
+RegisterBankInfo::getInstructionMappingImpl(
+ bool IsInvalid, unsigned ID, unsigned Cost,
+ const RegisterBankInfo::ValueMapping *OperandsMapping,
+ unsigned NumOperands) const {
+ assert(((IsInvalid && ID == InvalidMappingID && Cost == 0 &&
+ OperandsMapping == nullptr && NumOperands == 0) ||
+ !IsInvalid) &&
+ "Mismatch argument for invalid input");
+ ++NumInstructionMappingsAccessed;
+
+ hash_code Hash =
+ hashInstructionMapping(ID, Cost, OperandsMapping, NumOperands);
+ const auto &It = MapOfInstructionMappings.find(Hash);
+ if (It != MapOfInstructionMappings.end())
+ return *It->second;
+
+ ++NumInstructionMappingsCreated;
+
+ auto &InstrMapping = MapOfInstructionMappings[Hash];
+ if (IsInvalid)
+ InstrMapping = llvm::make_unique<InstructionMapping>();
+ else
+ InstrMapping = llvm::make_unique<InstructionMapping>(
+ ID, Cost, OperandsMapping, NumOperands);
+ return *InstrMapping;
+}
+
+const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
llvm_unreachable("The target must implement this");
@@ -332,14 +370,14 @@ RegisterBankInfo::InstructionMappings
RegisterBankInfo::getInstrPossibleMappings(const MachineInstr &MI) const {
InstructionMappings PossibleMappings;
// Put the default mapping first.
- PossibleMappings.push_back(getInstrMapping(MI));
+ PossibleMappings.push_back(&getInstrMapping(MI));
// Then the alternative mapping, if any.
InstructionMappings AltMappings = getInstrAlternativeMappings(MI);
- for (InstructionMapping &AltMapping : AltMappings)
- PossibleMappings.emplace_back(std::move(AltMapping));
+ for (const InstructionMapping *AltMapping : AltMappings)
+ PossibleMappings.push_back(AltMapping);
#ifndef NDEBUG
- for (const InstructionMapping &Mapping : PossibleMappings)
- assert(Mapping.verify(MI) && "Mapping is invalid");
+ for (const InstructionMapping *Mapping : PossibleMappings)
+ assert(Mapping->verify(MI) && "Mapping is invalid");
#endif
return PossibleMappings;
}
diff --git a/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index cac22af32956..1d36ff4e1458 100644
--- a/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "MIParser.h"
+
#include "MILexer.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/AsmParser/SlotMapping.h"
+#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -134,7 +136,8 @@ public:
bool
parseBasicBlockDefinition(DenseMap<unsigned, MachineBasicBlock *> &MBBSlots);
- bool parseBasicBlock(MachineBasicBlock &MBB);
+ bool parseBasicBlock(MachineBasicBlock &MBB,
+ MachineBasicBlock *&AddFalthroughFrom);
bool parseBasicBlockLiveins(MachineBasicBlock &MBB);
bool parseBasicBlockSuccessors(MachineBasicBlock &MBB);
@@ -518,7 +521,8 @@ bool MIParser::parseBasicBlockSuccessors(MachineBasicBlock &MBB) {
return false;
}
-bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
+bool MIParser::parseBasicBlock(MachineBasicBlock &MBB,
+ MachineBasicBlock *&AddFalthroughFrom) {
// Skip the definition.
assert(Token.is(MIToken::MachineBasicBlockLabel));
lex();
@@ -538,10 +542,12 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
//
// is equivalent to
// liveins: %edi, %esi
+ bool ExplicitSuccesors = false;
while (true) {
if (Token.is(MIToken::kw_successors)) {
if (parseBasicBlockSuccessors(MBB))
return true;
+ ExplicitSuccesors = true;
} else if (Token.is(MIToken::kw_liveins)) {
if (parseBasicBlockLiveins(MBB))
return true;
@@ -557,10 +563,9 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
// Parse the instructions.
bool IsInBundle = false;
MachineInstr *PrevMI = nullptr;
- while (true) {
- if (Token.is(MIToken::MachineBasicBlockLabel) || Token.is(MIToken::Eof))
- return false;
- else if (consumeIfPresent(MIToken::Newline))
+ while (!Token.is(MIToken::MachineBasicBlockLabel) &&
+ !Token.is(MIToken::Eof)) {
+ if (consumeIfPresent(MIToken::Newline))
continue;
if (consumeIfPresent(MIToken::rbrace)) {
// The first parsing pass should verify that all closing '}' have an
@@ -592,6 +597,22 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
assert(Token.isNewlineOrEOF() && "MI is not fully parsed");
lex();
}
+
+ // Construct successor list by searching for basic block machine operands.
+ if (!ExplicitSuccesors) {
+ SmallVector<MachineBasicBlock*,4> Successors;
+ bool IsFallthrough;
+ guessSuccessors(MBB, Successors, IsFallthrough);
+ for (MachineBasicBlock *Succ : Successors)
+ MBB.addSuccessor(Succ);
+
+ if (IsFallthrough) {
+ AddFalthroughFrom = &MBB;
+ } else {
+ MBB.normalizeSuccProbs();
+ }
+ }
+
return false;
}
@@ -605,11 +626,18 @@ bool MIParser::parseBasicBlocks() {
// The first parsing pass should have verified that this token is a MBB label
// in the 'parseBasicBlockDefinitions' method.
assert(Token.is(MIToken::MachineBasicBlockLabel));
+ MachineBasicBlock *AddFalthroughFrom = nullptr;
do {
MachineBasicBlock *MBB = nullptr;
if (parseMBBReference(MBB))
return true;
- if (parseBasicBlock(*MBB))
+ if (AddFalthroughFrom) {
+ if (!AddFalthroughFrom->isSuccessor(MBB))
+ AddFalthroughFrom->addSuccessor(MBB);
+ AddFalthroughFrom->normalizeSuccProbs();
+ AddFalthroughFrom = nullptr;
+ }
+ if (parseBasicBlock(*MBB, AddFalthroughFrom))
return true;
// The method 'parseBasicBlock' should parse the whole block until the next
// block or the end of file.
diff --git a/contrib/llvm/lib/CodeGen/MIRPrinter.cpp b/contrib/llvm/lib/CodeGen/MIRPrinter.cpp
index d017b21f0a59..6f6a67d81b0f 100644
--- a/contrib/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MIRPrinter.h"
+#include "llvm/CodeGen/MIRPrinter.h"
+
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
@@ -34,6 +35,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Options.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -42,6 +44,9 @@
using namespace llvm;
+static cl::opt<bool> SimplifyMIR("simplify-mir",
+ cl::desc("Leave out unnecessary information when printing MIR"));
+
namespace {
/// This structure describes how to print out stack object references.
@@ -105,6 +110,9 @@ class MIPrinter {
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
const DenseMap<int, FrameIndexOperand> &StackObjectOperandMapping;
+ bool canPredictBranchProbabilities(const MachineBasicBlock &MBB) const;
+ bool canPredictSuccessors(const MachineBasicBlock &MBB) const;
+
public:
MIPrinter(raw_ostream &OS, ModuleSlotTracker &MST,
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds,
@@ -454,6 +462,63 @@ void MIRPrinter::initRegisterMaskIds(const MachineFunction &MF) {
RegisterMaskIds.insert(std::make_pair(Mask, I++));
}
+void llvm::guessSuccessors(const MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineBasicBlock*> &Result,
+ bool &IsFallthrough) {
+ SmallPtrSet<MachineBasicBlock*,8> Seen;
+
+ for (const MachineInstr &MI : MBB) {
+ if (MI.isPHI())
+ continue;
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isMBB())
+ continue;
+ MachineBasicBlock *Succ = MO.getMBB();
+ auto RP = Seen.insert(Succ);
+ if (RP.second)
+ Result.push_back(Succ);
+ }
+ }
+ MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
+ IsFallthrough = I == MBB.end() || !I->isBarrier();
+}
+
+bool
+MIPrinter::canPredictBranchProbabilities(const MachineBasicBlock &MBB) const {
+ if (MBB.succ_size() <= 1)
+ return true;
+ if (!MBB.hasSuccessorProbabilities())
+ return true;
+
+ SmallVector<BranchProbability,8> Normalized(MBB.Probs.begin(),
+ MBB.Probs.end());
+ BranchProbability::normalizeProbabilities(Normalized.begin(),
+ Normalized.end());
+ SmallVector<BranchProbability,8> Equal(Normalized.size());
+ BranchProbability::normalizeProbabilities(Equal.begin(), Equal.end());
+
+ return std::equal(Normalized.begin(), Normalized.end(), Equal.begin());
+}
+
+bool MIPrinter::canPredictSuccessors(const MachineBasicBlock &MBB) const {
+ SmallVector<MachineBasicBlock*,8> GuessedSuccs;
+ bool GuessedFallthrough;
+ guessSuccessors(MBB, GuessedSuccs, GuessedFallthrough);
+ if (GuessedFallthrough) {
+ const MachineFunction &MF = *MBB.getParent();
+ MachineFunction::const_iterator NextI = std::next(MBB.getIterator());
+ if (NextI != MF.end()) {
+ MachineBasicBlock *Next = const_cast<MachineBasicBlock*>(&*NextI);
+ if (!is_contained(GuessedSuccs, Next))
+ GuessedSuccs.push_back(Next);
+ }
+ }
+ if (GuessedSuccs.size() != MBB.succ_size())
+ return false;
+ return std::equal(MBB.succ_begin(), MBB.succ_end(), GuessedSuccs.begin());
+}
+
+
void MIPrinter::print(const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
OS << "bb." << MBB.getNumber();
@@ -492,13 +557,15 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
bool HasLineAttributes = false;
// Print the successors
- if (!MBB.succ_empty()) {
+ bool canPredictProbs = canPredictBranchProbabilities(MBB);
+ if (!MBB.succ_empty() && (!SimplifyMIR || !canPredictProbs ||
+ !canPredictSuccessors(MBB))) {
OS.indent(2) << "successors: ";
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
printMBBReference(**I);
- if (MBB.hasSuccessorProbabilities())
+ if (!SimplifyMIR || !canPredictProbs)
OS << '('
<< format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator())
<< ')';
diff --git a/contrib/llvm/lib/CodeGen/MIRPrintingPass.cpp b/contrib/llvm/lib/CodeGen/MIRPrintingPass.cpp
index c690bcfad567..671cf1eddc2d 100644
--- a/contrib/llvm/lib/CodeGen/MIRPrintingPass.cpp
+++ b/contrib/llvm/lib/CodeGen/MIRPrintingPass.cpp
@@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MIRPrinter.h"
+#include "llvm/CodeGen/MIRPrinter.h"
+
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
diff --git a/contrib/llvm/lib/CodeGen/MachineFrameInfo.cpp b/contrib/llvm/lib/CodeGen/MachineFrameInfo.cpp
index 7de8434df806..73d778ff3023 100644
--- a/contrib/llvm/lib/CodeGen/MachineFrameInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFrameInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cassert>
@@ -175,6 +176,31 @@ unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
return (unsigned)Offset;
}
+void MachineFrameInfo::computeMaxCallFrameSize(const MachineFunction &MF) {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
+ unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
+ assert(FrameSetupOpcode != ~0u && FrameDestroyOpcode != ~0u &&
+ "Can only compute MaxCallFrameSize if Setup/Destroy opcode are known");
+
+ MaxCallFrameSize = 0;
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == FrameSetupOpcode || Opcode == FrameDestroyOpcode) {
+ unsigned Size = TII.getFrameSize(MI);
+ MaxCallFrameSize = std::max(MaxCallFrameSize, Size);
+ AdjustsStack = true;
+ } else if (MI.isInlineAsm()) {
+ // Some inline asm's need a stack frame, as indicated by operand 1.
+ unsigned ExtraInfo = MI.getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
+ AdjustsStack = true;
+ }
+ }
+ }
+}
+
void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
if (Objects.empty()) return;
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index 84bd670105e1..bfb2cde030dc 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -188,8 +188,9 @@ namespace {
return Reg < regsReserved.size() && regsReserved.test(Reg);
}
- bool isAllocatable(unsigned Reg) {
- return Reg < TRI->getNumRegs() && MRI->isAllocatable(Reg);
+ bool isAllocatable(unsigned Reg) const {
+ return Reg < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
+ !regsReserved.test(Reg);
}
// Analysis information if available
@@ -526,7 +527,8 @@ void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
void MachineVerifier::visitMachineFunctionBefore() {
lastIndex = SlotIndex();
- regsReserved = MRI->getReservedRegs();
+ regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
+ : TRI->getReservedRegs(*MF);
if (!MF->empty())
markReachable(&MF->front());
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 549f07ecd9ce..d2afeae9e70b 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -277,6 +277,9 @@ void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
AdjustsStack = true;
}
+ assert(!MFI.isMaxCallFrameSizeComputed() ||
+ (MFI.getMaxCallFrameSize() == MaxCallFrameSize &&
+ MFI.adjustsStack() == AdjustsStack));
MFI.setAdjustsStack(AdjustsStack);
MFI.setMaxCallFrameSize(MaxCallFrameSize);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 03698ac862af..c77046fdfaf5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6688,6 +6688,9 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
if (isAbs) {
EVT VT = LHS.getValueType();
+ if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
+ return DAG.getNode(ISD::ABS, DL, VT, LHS);
+
SDValue Shift = DAG.getNode(
ISD::SRA, DL, VT, LHS,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
@@ -9469,6 +9472,14 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
return SDValue();
}
+static bool isFMulNegTwo(SDValue &N) {
+ if (N.getOpcode() != ISD::FMUL)
+ return false;
+ if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N.getOperand(1)))
+ return CFP->isExactlyValue(-2.0);
+ return false;
+}
+
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -9507,6 +9518,16 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
return DAG.getNode(ISD::FSUB, DL, VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations), Flags);
+ // fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B))
+ // fold (fadd (fmul B, -2.0), A) -> (fsub A, (fadd B, B))
+ if ((isFMulNegTwo(N0) && N0.hasOneUse()) ||
+ (isFMulNegTwo(N1) && N1.hasOneUse())) {
+ bool N1IsFMul = isFMulNegTwo(N1);
+ SDValue AddOp = N1IsFMul ? N1.getOperand(0) : N0.getOperand(0);
+ SDValue Add = DAG.getNode(ISD::FADD, DL, VT, AddOp, AddOp, Flags);
+ return DAG.getNode(ISD::FSUB, DL, VT, N1IsFMul ? N0 : N1, Add, Flags);
+ }
+
// FIXME: Auto-upgrade the target/function-level option.
if (Options.NoSignedZerosFPMath || N->getFlags().hasNoSignedZeros()) {
// fold (fadd A, 0) -> A
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 6fb26fc3b73d..8c98e3740f6d 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -861,6 +861,25 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
return true;
}
+bool FastISel::selectXRayCustomEvent(const CallInst *I) {
+ const auto &Triple = TM.getTargetTriple();
+ if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
+ return true; // don't do anything to this instruction.
+ SmallVector<MachineOperand, 8> Ops;
+ Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
+ /*IsDef=*/false));
+ Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
+ /*IsDef=*/false));
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
+ for (auto &MO : Ops)
+ MIB.add(MO);
+ // Insert the Patchable Event Call instruction, that gets lowered properly.
+ return true;
+}
+
+
/// Returns an AttributeList representing the attributes applied to the return
/// value of the given call.
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
@@ -1252,6 +1271,9 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
return selectPatchpoint(II);
+
+ case Intrinsic::xray_customevent:
+ return selectXRayCustomEvent(II);
}
return fastLowerIntrinsicCall(II);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index a0135dc40b87..cdf4d3a8b4e5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -402,8 +402,7 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
if (BitWidth > LOI->Known.getBitWidth()) {
LOI->NumSignBits = 1;
- LOI->Known.Zero = LOI->Known.Zero.zextOrTrunc(BitWidth);
- LOI->Known.One = LOI->Known.One.zextOrTrunc(BitWidth);
+ LOI->Known = LOI->Known.zextOrTrunc(BitWidth);
}
return LOI;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
index a1d70ab6f036..a21b4c733254 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
@@ -67,12 +67,11 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS)
unsigned
ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned NumberDeps = 0;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isCtrl())
continue;
- SUnit *PredSU = I->getSUnit();
+ SUnit *PredSU = Pred.getSUnit();
const SDNode *ScegN = PredSU->getNode();
if (!ScegN)
@@ -105,12 +104,11 @@ ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
unsigned RCId) {
unsigned NumberDeps = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isCtrl())
+ for (const SDep &Succ : SU->Succs) {
+ if (Succ.isCtrl())
continue;
- SUnit *SuccSU = I->getSUnit();
+ SUnit *SuccSU = Succ.getSUnit();
const SDNode *ScegN = SuccSU->getNode();
if (!ScegN)
continue;
@@ -142,9 +140,8 @@ unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
static unsigned numberCtrlDepsInSU(SUnit *SU) {
unsigned NumberDeps = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- if (I->isCtrl())
+ for (const SDep &Succ : SU->Succs)
+ if (Succ.isCtrl())
NumberDeps++;
return NumberDeps;
@@ -152,9 +149,8 @@ static unsigned numberCtrlDepsInSU(SUnit *SU) {
static unsigned numberCtrlPredInSU(SUnit *SU) {
unsigned NumberDeps = 0;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I)
- if (I->isCtrl())
+ for (SDep &Pred : SU->Preds)
+ if (Pred.isCtrl())
NumberDeps++;
return NumberDeps;
@@ -212,15 +208,14 @@ bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
/// of SU, return it, otherwise return null.
SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
SUnit *OnlyAvailablePred = nullptr;
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- SUnit &Pred = *I->getSUnit();
- if (!Pred.isScheduled) {
+ for (const SDep &Pred : SU->Preds) {
+ SUnit &PredSU = *Pred.getSUnit();
+ if (!PredSU.isScheduled) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
- if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
+ if (OnlyAvailablePred && OnlyAvailablePred != &PredSU)
return nullptr;
- OnlyAvailablePred = &Pred;
+ OnlyAvailablePred = &PredSU;
}
}
return OnlyAvailablePred;
@@ -230,9 +225,8 @@ void ResourcePriorityQueue::push(SUnit *SU) {
// Look at all of the successors of this node. Count the number of nodes that
// this node is the sole unscheduled node for.
unsigned NumNodesBlocking = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- if (getSingleUnscheduledPred(I->getSUnit()) == SU)
+ for (const SDep &Succ : SU->Succs)
+ if (getSingleUnscheduledPred(Succ.getSUnit()) == SU)
++NumNodesBlocking;
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
@@ -269,14 +263,13 @@ bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) {
// Now see if there are no other dependencies
// to instructions already in the packet.
for (unsigned i = 0, e = Packet.size(); i != e; ++i)
- for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
- E = Packet[i]->Succs.end(); I != E; ++I) {
+ for (const SDep &Succ : Packet[i]->Succs) {
// Since we do not add pseudos to packets, might as well
// ignore order deps.
- if (I->isCtrl())
+ if (Succ.isCtrl())
continue;
- if (I->getSUnit() == SU)
+ if (Succ.getSUnit() == SU)
return false;
}
@@ -499,11 +492,10 @@ void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
}
}
}
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0))
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isCtrl() || (Pred.getSUnit()->NumRegDefsLeft == 0))
continue;
- --I->getSUnit()->NumRegDefsLeft;
+ --Pred.getSUnit()->NumRegDefsLeft;
}
}
@@ -515,10 +507,9 @@ void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
// number of live ranges. All others, increase it.
unsigned NumberNonControlDeps = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- adjustPriorityOfUnscheduledPreds(I->getSUnit());
- if (!I->isCtrl())
+ for (const SDep &Succ : SU->Succs) {
+ adjustPriorityOfUnscheduledPreds(Succ.getSUnit());
+ if (!Succ.isCtrl())
NumberNonControlDeps++;
}
@@ -595,8 +586,7 @@ SUnit *ResourcePriorityQueue::pop() {
std::vector<SUnit *>::iterator Best = Queue.begin();
if (!DisableDFASched) {
int BestCost = SUSchedulingCost(*Best);
- for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
- E = Queue.end(); I != E; ++I) {
+ for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) {
if (SUSchedulingCost(*I) > BestCost) {
BestCost = SUSchedulingCost(*I);
@@ -606,8 +596,7 @@ SUnit *ResourcePriorityQueue::pop() {
}
// Use default TD scheduling mechanism.
else {
- for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
- E = Queue.end(); I != E; ++I)
+ for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index 62e7733ecd2b..d80a281279b6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -160,18 +160,17 @@ void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- ReleasePred(SU, &*I);
- if (I->isAssignedRegDep()) {
+ for (SDep &Pred : SU->Preds) {
+ ReleasePred(SU, &Pred);
+ if (Pred.isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
- if (!LiveRegDefs[I->getReg()]) {
+ if (!LiveRegDefs[Pred.getReg()]) {
++NumLiveRegs;
- LiveRegDefs[I->getReg()] = I->getSUnit();
- LiveRegCycles[I->getReg()] = CurCycle;
+ LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
+ LiveRegCycles[Pred.getReg()] = CurCycle;
}
}
}
@@ -191,16 +190,15 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isAssignedRegDep()) {
- if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isAssignedRegDep()) {
+ if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->getReg()] == SU &&
+ assert(LiveRegDefs[Succ.getReg()] == SU &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = nullptr;
- LiveRegCycles[I->getReg()] = 0;
+ LiveRegDefs[Succ.getReg()] = nullptr;
+ LiveRegCycles[Succ.getReg()] = 0;
}
}
}
@@ -282,22 +280,20 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
SmallVector<SDep, 4> NodeSuccs;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
- ChainPred = *I;
- else if (I->getSUnit()->getNode() &&
- I->getSUnit()->getNode()->isOperandOf(LoadNode))
- LoadPreds.push_back(*I);
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isCtrl())
+ ChainPred = Pred;
+ else if (Pred.getSUnit()->getNode() &&
+ Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
+ LoadPreds.push_back(Pred);
else
- NodePreds.push_back(*I);
+ NodePreds.push_back(Pred);
}
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isCtrl())
- ChainSuccs.push_back(*I);
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isCtrl())
+ ChainSuccs.push_back(Succ);
else
- NodeSuccs.push_back(*I);
+ NodeSuccs.push_back(Succ);
}
if (ChainPred.getSUnit()) {
@@ -354,21 +350,19 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
NewSU = Clone(SU);
// New SUnit has the exact same predecessors.
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I)
- if (!I->isArtificial())
- AddPred(NewSU, *I);
+ for (SDep &Pred : SU->Preds)
+ if (!Pred.isArtificial())
+ AddPred(NewSU, Pred);
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isArtificial())
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isArtificial())
continue;
- SUnit *SuccSU = I->getSUnit();
+ SUnit *SuccSU = Succ.getSUnit();
if (SuccSU->isScheduled) {
- SDep D = *I;
+ SDep D = Succ;
D.setSUnit(NewSU);
AddPred(SuccSU, D);
D.setSUnit(SU);
@@ -399,16 +393,15 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isArtificial())
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isArtificial())
continue;
- SUnit *SuccSU = I->getSUnit();
+ SUnit *SuccSU = Succ.getSUnit();
if (SuccSU->isScheduled) {
- SDep D = *I;
+ SDep D = Succ;
D.setSUnit(CopyToSU);
AddPred(SuccSU, D);
- DelDeps.push_back(std::make_pair(SuccSU, *I));
+ DelDeps.push_back(std::make_pair(SuccSU, Succ));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
@@ -479,10 +472,9 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
SmallSet<unsigned, 4> RegAdded;
// If this node would clobber any "live" register, then it's not ready.
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isAssignedRegDep()) {
- CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isAssignedRegDep()) {
+ CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
RegAdded, LRegs, TRI);
}
}
@@ -755,9 +747,8 @@ void ScheduleDAGLinearize::Schedule() {
// Glue user must be scheduled together with the glue operand. So other
// users of the glue operand must be treated as its users.
SDNode *ImmGUser = Glue->getGluedUser();
- for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
- ui != ue; ++ui)
- if (*ui == ImmGUser)
+ for (const SDNode *U : Glue->uses())
+ if (U == ImmGUser)
--Degree;
GUser->setNodeId(UDegree + Degree);
Glue->setNodeId(1);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 69b76fbe57d2..4f4025d8ae6a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -520,21 +520,20 @@ FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
/// interference on flags.
void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
// Bottom up: release predecessors
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- ReleasePred(SU, &*I);
- if (I->isAssignedRegDep()) {
+ for (SDep &Pred : SU->Preds) {
+ ReleasePred(SU, &Pred);
+ if (Pred.isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
- SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
- assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
+ SUnit *RegDef = LiveRegDefs[Pred.getReg()]; (void)RegDef;
+ assert((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) &&
"interference on register dependence");
- LiveRegDefs[I->getReg()] = I->getSUnit();
- if (!LiveRegGens[I->getReg()]) {
+ LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
+ if (!LiveRegGens[Pred.getReg()]) {
++NumLiveRegs;
- LiveRegGens[I->getReg()] = SU;
+ LiveRegGens[Pred.getReg()] = SU;
}
}
}
@@ -733,15 +732,14 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
ReleasePredecessors(SU);
// Release all the implicit physical register defs that are live.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
- if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
+ for (SDep &Succ : SU->Succs) {
+ // LiveRegDegs[Succ.getReg()] != SU when SU is a two-address node.
+ if (Succ.isAssignedRegDep() && LiveRegDefs[Succ.getReg()] == SU) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = nullptr;
- LiveRegGens[I->getReg()] = nullptr;
- releaseInterferences(I->getReg());
+ LiveRegDefs[Succ.getReg()] = nullptr;
+ LiveRegGens[Succ.getReg()] = nullptr;
+ releaseInterferences(Succ.getReg());
}
}
// Release the special call resource dependence, if this is the beginning
@@ -802,17 +800,16 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- CapturePred(&*I);
- if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
+ for (SDep &Pred : SU->Preds) {
+ CapturePred(&Pred);
+ if (Pred.isAssignedRegDep() && SU == LiveRegGens[Pred.getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
+ assert(LiveRegDefs[Pred.getReg()] == Pred.getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = nullptr;
- LiveRegGens[I->getReg()] = nullptr;
- releaseInterferences(I->getReg());
+ LiveRegDefs[Pred.getReg()] = nullptr;
+ LiveRegGens[Pred.getReg()] = nullptr;
+ releaseInterferences(Pred.getReg());
}
}
@@ -895,7 +892,7 @@ void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
unsigned HazardCycle = (*I)->getHeight();
- for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
+ for (auto E = Sequence.end(); I != E; ++I) {
SUnit *SU = *I;
for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
HazardRec->RecedeCycle();
@@ -1261,10 +1258,9 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
//
// If SU is the currently live definition of the same register that it uses,
// then we are free to schedule it.
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
- CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs.get(),
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isAssignedRegDep() && LiveRegDefs[Pred.getReg()] != SU)
+ CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs.get(),
RegAdded, LRegs, TRI);
}
@@ -1743,8 +1739,7 @@ protected:
template<class SF>
static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
std::vector<SUnit *>::iterator Best = Q.begin();
- for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
- E = Q.end(); I != E; ++I)
+ for (auto I = std::next(Q.begin()), E = Q.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
SUnit *V = *Best;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9d949a2bbfa6..d605a1dc1c20 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2017,8 +2017,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (SrcOp.getValueSizeInBits() != BitWidth) {
assert(SrcOp.getValueSizeInBits() > BitWidth &&
"Expected BUILD_VECTOR implicit truncation");
- Known2.One = Known2.One.trunc(BitWidth);
- Known2.Zero = Known2.Zero.trunc(BitWidth);
+ Known2 = Known2.trunc(BitWidth);
}
// Known bits are the values that are shared by every demanded element.
@@ -2045,8 +2044,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (M < 0) {
// For UNDEF elements, we don't know anything about the common state of
// the shuffle result.
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
DemandedLHS.clearAllBits();
DemandedRHS.clearAllBits();
break;
@@ -2219,14 +2217,13 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
Known2.Zero.countLeadingOnes(),
BitWidth) - BitWidth;
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
break;
@@ -2377,7 +2374,10 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
break;
}
case ISD::CTPOP: {
- Known.Zero.setBitsFrom(Log2_32(BitWidth)+1);
+ computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
+ // If we know some of the bits are zero, they can't be one.
+ unsigned PossibleOnes = BitWidth - Known2.Zero.countPopulation();
+ Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
break;
}
case ISD::LOAD: {
@@ -2396,24 +2396,20 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
case ISD::ZERO_EXTEND_VECTOR_INREG: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known,
DemandedElts.zext(InVT.getVectorNumElements()),
Depth + 1);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBits);
break;
}
case ISD::ZERO_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBits);
break;
}
@@ -2422,34 +2418,28 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
// If the sign bit is known to be zero or one, then sext will extend
// it to the top bits, else it will just zext.
- Known.Zero = Known.Zero.sext(BitWidth);
- Known.One = Known.One.sext(BitWidth);
+ Known = Known.sext(BitWidth);
break;
}
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, Depth+1);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.zext(InBits);
- Known.One = Known.One.zext(InBits);
+ Known = Known.zext(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
break;
}
case ISD::AssertZext: {
@@ -2606,8 +2596,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
uint32_t Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@@ -2621,8 +2610,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth);
// Remove high part of known bit mask
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
break;
}
case ISD::EXTRACT_VECTOR_ELT: {
@@ -2634,10 +2622,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
const unsigned NumSrcElts = VecVT.getVectorNumElements();
// If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
// anything about the extended bits.
- if (BitWidth > EltBitWidth) {
- Known.Zero = Known.Zero.trunc(EltBitWidth);
- Known.One = Known.One.trunc(EltBitWidth);
- }
+ if (BitWidth > EltBitWidth)
+ Known = Known.trunc(EltBitWidth);
ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
// If we know the element index, just demand that vector element.
@@ -2648,10 +2634,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Unknown element index, so ignore DemandedElts and demand them all.
computeKnownBits(InVec, Known, Depth + 1);
}
- if (BitWidth > EltBitWidth) {
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
- }
+ if (BitWidth > EltBitWidth)
+ Known = Known.zext(BitWidth);
break;
}
case ISD::INSERT_VECTOR_ELT: {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ba9e11798f15..50313e2da884 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4992,45 +4992,33 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
SDNodeOrder);
DAG.AddDbgValue(SDV, nullptr, false);
- } else {
- // Do not use getValue() in here; we don't want to generate code at
- // this point if it hasn't been done yet.
- SDValue N = NodeMap[V];
- if (!N.getNode() && isa<Argument>(V))
- // Check unused arguments map.
- N = UnusedArgNodeMap[V];
- if (N.getNode()) {
- if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
- false, N)) {
- SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, N.getNode(), false);
- }
- } else if (!V->use_empty() ) {
- // Do not call getValue(V) yet, as we don't want to generate code.
- // Remember it for later.
- DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
- DanglingDebugInfoMap[V] = DDI;
- } else {
- // We may expand this to cover more cases. One case where we have no
- // data available is an unreferenced parameter.
- DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
- }
+ return nullptr;
}
- // Build a debug info table entry.
- if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
- V = BCI->getOperand(0);
- const AllocaInst *AI = dyn_cast<AllocaInst>(V);
- // Don't handle byval struct arguments or VLAs, for example.
- if (!AI) {
- DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
- DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
+ // Do not use getValue() in here; we don't want to generate code at
+ // this point if it hasn't been done yet.
+ SDValue N = NodeMap[V];
+ if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
+ N = UnusedArgNodeMap[V];
+ if (N.getNode()) {
+ if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset, false,
+ N))
+ return nullptr;
+ SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, N.getNode(), false);
return nullptr;
}
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
- if (SI == FuncInfo.StaticAllocaMap.end())
- return nullptr; // VLAs.
+
+ if (!V->use_empty() ) {
+ // Do not call getValue(V) yet, as we don't want to generate code.
+ // Remember it for later.
+ DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
+ DanglingDebugInfoMap[V] = DDI;
+ return nullptr;
+ }
+
+ DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
+ DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return nullptr;
}
@@ -5715,7 +5703,37 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
setValue(&I, N);
return nullptr;
}
+ case Intrinsic::xray_customevent: {
+ // Here we want to make sure that the intrinsic behaves as if it has a
+ // specific calling convention, and only for x86_64.
+ // FIXME: Support other platforms later.
+ const auto &Triple = DAG.getTarget().getTargetTriple();
+ if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
+ return nullptr;
+ SDLoc DL = getCurSDLoc();
+ SmallVector<SDValue, 8> Ops;
+
+ // We want to say that we always want the arguments in registers.
+ SDValue LogEntryVal = getValue(I.getArgOperand(0));
+ SDValue StrSizeVal = getValue(I.getArgOperand(1));
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue Chain = getRoot();
+ Ops.push_back(LogEntryVal);
+ Ops.push_back(StrSizeVal);
+ Ops.push_back(Chain);
+
+ // We need to enforce the calling convention for the callsite, so that
+ // argument ordering is enforced correctly, and that register allocation can
+ // see that some registers may be assumed clobbered and have to preserve
+ // them across calls to the intrinsic.
+ MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
+ DL, NodeTys, Ops);
+ SDValue patchableNode = SDValue(MN, 0);
+ DAG.setRoot(patchableNode);
+ setValue(&I, patchableNode);
+ return nullptr;
+ }
case Intrinsic::experimental_deoptimize:
LowerDeoptimizeCall(&I);
return nullptr;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 2d39ecd9779b..23f597db140c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -561,8 +561,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (Known2.One.getBitWidth() != BitWidth) {
assert(Known2.getBitWidth() > BitWidth &&
"Expected BUILD_VECTOR implicit truncation");
- Known2.One = Known2.One.trunc(BitWidth);
- Known2.Zero = Known2.Zero.trunc(BitWidth);
+ Known2 = Known2.trunc(BitWidth);
}
// Known bits are the values that are shared by every element.
@@ -659,7 +658,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// Output known-1 are known to be set if set in either the LHS | RHS.
Known.One |= Known2.One;
break;
- case ISD::XOR:
+ case ISD::XOR: {
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, Known, TLO, Depth+1))
return true;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
@@ -704,28 +703,24 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
}
- // If the RHS is a constant, see if we can simplify it.
- // for XOR, we prefer to force bits to 1 if they will make a -1.
- // If we can't force bits, try to shrink the constant.
- if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
- APInt Expanded = C->getAPIntValue() | (~NewMask);
- // If we can expand it to have all bits set, do it.
- if (Expanded.isAllOnesValue()) {
- if (Expanded != C->getAPIntValue()) {
- EVT VT = Op.getValueType();
- SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
- TLO.DAG.getConstant(Expanded, dl, VT));
- return TLO.CombineTo(Op, New);
- }
- // If it already has all the bits set, nothing to change
- // but don't shrink either!
- } else if (ShrinkDemandedConstant(Op, NewMask, TLO)) {
- return true;
+ // If the RHS is a constant, see if we can change it. Don't alter a -1
+ // constant because that's a 'not' op, and that is better for combining and
+ // codegen.
+ ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1));
+ if (C && !C->isAllOnesValue()) {
+ if (NewMask.isSubsetOf(C->getAPIntValue())) {
+ // We're flipping all demanded bits. Flip the undemanded bits too.
+ SDValue New = TLO.DAG.getNOT(dl, Op.getOperand(0), Op.getValueType());
+ return TLO.CombineTo(Op, New);
}
+ // If we can't turn this into a 'not', try to shrink the constant.
+ if (ShrinkDemandedConstant(Op, NewMask, TLO))
+ return true;
}
Known = std::move(KnownOut);
break;
+ }
case ISD::SELECT:
if (SimplifyDemandedBits(Op.getOperand(2), NewMask, Known, TLO, Depth+1))
return true;
@@ -1091,8 +1086,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InMask, Known, TLO, Depth+1))
return true;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero |= NewBits;
break;
}
@@ -1118,8 +1112,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, Known, TLO,
Depth+1))
return true;
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
// If the sign bit is known zero, convert this to a zero extend.
if (Known.Zero.intersects(InSignBit))
@@ -1143,8 +1136,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InMask, Known, TLO, Depth+1))
return true;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
@@ -1154,8 +1146,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
APInt TruncMask = NewMask.zext(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, Known, TLO, Depth+1))
return true;
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
// If the input is only used by this truncate, see if we can shrink it based
// on the known demanded bits.
@@ -1312,7 +1303,7 @@ void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
}
/// This method can be implemented by targets that want to expose additional
diff --git a/contrib/llvm/lib/CodeGen/XRayInstrumentation.cpp b/contrib/llvm/lib/CodeGen/XRayInstrumentation.cpp
index 7d2848bdc13b..2df3602733f3 100644
--- a/contrib/llvm/lib/CodeGen/XRayInstrumentation.cpp
+++ b/contrib/llvm/lib/CodeGen/XRayInstrumentation.cpp
@@ -18,6 +18,8 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -33,6 +35,14 @@ struct XRayInstrumentation : public MachineFunctionPass {
initializeXRayInstrumentationPass(*PassRegistry::getPassRegistry());
}
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addPreserved<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
bool runOnMachineFunction(MachineFunction &MF) override;
private:
@@ -43,7 +53,7 @@ private:
// This is the approach to go on CPUs which have a single RET instruction,
// like x86/x86_64.
void replaceRetWithPatchableRet(MachineFunction &MF,
- const TargetInstrInfo *TII);
+ const TargetInstrInfo *TII);
// Prepend the original return instruction with the exit sled code ("patchable
// function exit" pseudo-instruction), preserving the original return
@@ -54,13 +64,12 @@ private:
// have to call the trampoline and return from it to the original return
// instruction of the function being instrumented.
void prependRetWithPatchableExit(MachineFunction &MF,
- const TargetInstrInfo *TII);
+ const TargetInstrInfo *TII);
};
} // anonymous namespace
-void XRayInstrumentation::replaceRetWithPatchableRet(MachineFunction &MF,
- const TargetInstrInfo *TII)
-{
+void XRayInstrumentation::replaceRetWithPatchableRet(
+ MachineFunction &MF, const TargetInstrInfo *TII) {
// We look for *all* terminators and returns, then replace those with
// PATCHABLE_RET instructions.
SmallVector<MachineInstr *, 4> Terminators;
@@ -91,9 +100,8 @@ void XRayInstrumentation::replaceRetWithPatchableRet(MachineFunction &MF,
I->eraseFromParent();
}
-void XRayInstrumentation::prependRetWithPatchableExit(MachineFunction &MF,
- const TargetInstrInfo *TII)
-{
+void XRayInstrumentation::prependRetWithPatchableExit(
+ MachineFunction &MF, const TargetInstrInfo *TII) {
for (auto &MBB : MF) {
for (auto &T : MBB.terminators()) {
unsigned Opc = 0;
@@ -106,7 +114,7 @@ void XRayInstrumentation::prependRetWithPatchableExit(MachineFunction &MF,
if (Opc != 0) {
// Prepend the return instruction with PATCHABLE_FUNCTION_EXIT or
// PATCHABLE_TAIL_CALL .
- BuildMI(MBB, T, T.getDebugLoc(),TII->get(Opc));
+ BuildMI(MBB, T, T.getDebugLoc(), TII->get(Opc));
}
}
}
@@ -125,8 +133,13 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
return false; // XRay threshold attribute not found.
if (Attr.getValueAsString().getAsInteger(10, XRayThreshold))
return false; // Invalid value for threshold.
- if (F.size() < XRayThreshold)
- return false; // Function is too small.
+
+ // Check if we have a loop.
+ // FIXME: Maybe make this smarter, and see whether the loops are dependent
+ // on inputs or side-effects?
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ if (MLI.empty() && F.size() < XRayThreshold)
+ return false; // Function is too small and has no loops.
}
// We look for the first non-empty MachineBasicBlock, so that we can insert
@@ -142,12 +155,10 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
if (!MF.getSubtarget().isXRaySupported()) {
FirstMI.emitError("An attempt to perform XRay instrumentation for an"
- " unsupported target.");
+ " unsupported target.");
return false;
}
- // FIXME: Do the loop triviality analysis here or in an earlier pass.
-
// First, insert an PATCHABLE_FUNCTION_ENTER as the first instruction of the
// MachineFunction.
BuildMI(FirstMBB, FirstMI, FirstMI.getDebugLoc(),
@@ -176,5 +187,8 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
char XRayInstrumentation::ID = 0;
char &llvm::XRayInstrumentationID = XRayInstrumentation::ID;
-INITIALIZE_PASS(XRayInstrumentation, "xray-instrumentation", "Insert XRay ops",
- false, false)
+INITIALIZE_PASS_BEGIN(XRayInstrumentation, "xray-instrumentation",
+ "Insert XRay ops", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(XRayInstrumentation, "xray-instrumentation",
+ "Insert XRay ops", false, false)
diff --git a/contrib/llvm/lib/DebugInfo/CodeView/TypeDatabase.cpp b/contrib/llvm/lib/DebugInfo/CodeView/TypeDatabase.cpp
index efaba4646ffe..5b8841041f88 100644
--- a/contrib/llvm/lib/DebugInfo/CodeView/TypeDatabase.cpp
+++ b/contrib/llvm/lib/DebugInfo/CodeView/TypeDatabase.cpp
@@ -65,6 +65,11 @@ static const SimpleTypeEntry SimpleTypeNames[] = {
{"__bool64*", SimpleTypeKind::Boolean64},
};
+TypeDatabase::TypeDatabase(uint32_t ExpectedSize) : TypeNameStorage(Allocator) {
+ CVUDTNames.reserve(ExpectedSize);
+ TypeRecords.reserve(ExpectedSize);
+}
+
/// Gets the type index for the next type record.
TypeIndex TypeDatabase::getNextTypeIndex() const {
return TypeIndex(TypeIndex::FirstNonSimpleIndex + CVUDTNames.size());
diff --git a/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
index 573d37d77fee..246899ac12b9 100644
--- a/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -692,6 +692,10 @@ DWARFContext::getLineTableForUnit(DWARFUnit *U) {
if (const DWARFLineTable *lt = Line->getLineTable(stmtOffset))
return lt;
+ // Make sure the offset is good before we try to parse.
+ if (stmtOffset >= U->getLineSection().size())
+ return nullptr;
+
// We have to parse it first.
DataExtractor lineData(U->getLineSection(), isLittleEndian(),
U->getAddressByteSize());
@@ -953,6 +957,26 @@ static bool isRelocScattered(const object::ObjectFile &Obj,
return MachObj->isRelocationScattered(RelocInfo);
}
+Error DWARFContextInMemory::maybeDecompress(const SectionRef &Sec,
+ StringRef Name, StringRef &Data) {
+ if (!Decompressor::isCompressed(Sec))
+ return Error::success();
+
+ Expected<Decompressor> Decompressor =
+ Decompressor::create(Name, Data, IsLittleEndian, AddressSize == 8);
+ if (!Decompressor)
+ return Decompressor.takeError();
+
+ SmallString<32> Out;
+ if (auto Err = Decompressor->decompress(Out))
+ return Err;
+
+ UncompressedSections.emplace_back(std::move(Out));
+ Data = UncompressedSections.back();
+
+ return Error::success();
+}
+
DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
const LoadedObjectInfo *L)
: IsLittleEndian(Obj.isLittleEndian()),
@@ -976,16 +1000,11 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
if (!L || !L->getLoadedSectionContents(*RelocatedSection,data))
Section.getContents(data);
- if (Decompressor::isCompressed(Section)) {
- Expected<Decompressor> Decompressor =
- Decompressor::create(name, data, IsLittleEndian, AddressSize == 8);
- if (!Decompressor)
- continue;
- SmallString<32> Out;
- if (auto Err = Decompressor->decompress(Out))
- continue;
- UncompressedSections.emplace_back(std::move(Out));
- data = UncompressedSections.back();
+ if (auto Err = maybeDecompress(Section, name, data)) {
+ errs() << "error: failed to decompress '" + name + "', " +
+ toString(std::move(Err))
+ << '\n';
+ continue;
}
// Compressed sections names in GNU style starts from ".z",
diff --git a/contrib/llvm/lib/DebugInfo/DWARF/DWARFFormValue.cpp b/contrib/llvm/lib/DebugInfo/DWARF/DWARFFormValue.cpp
index 7f827de89240..1cbd3ea2c869 100644
--- a/contrib/llvm/lib/DebugInfo/DWARF/DWARFFormValue.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARF/DWARFFormValue.cpp
@@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "SyntaxHighlighting.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
-#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include "llvm/Support/Dwarf.h"
@@ -29,34 +29,34 @@ using namespace dwarf;
using namespace syntax;
static const DWARFFormValue::FormClass DWARF4FormClasses[] = {
- DWARFFormValue::FC_Unknown, // 0x0
- DWARFFormValue::FC_Address, // 0x01 DW_FORM_addr
- DWARFFormValue::FC_Unknown, // 0x02 unused
- DWARFFormValue::FC_Block, // 0x03 DW_FORM_block2
- DWARFFormValue::FC_Block, // 0x04 DW_FORM_block4
- DWARFFormValue::FC_Constant, // 0x05 DW_FORM_data2
- // --- These can be FC_SectionOffset in DWARF3 and below:
- DWARFFormValue::FC_Constant, // 0x06 DW_FORM_data4
- DWARFFormValue::FC_Constant, // 0x07 DW_FORM_data8
- // ---
- DWARFFormValue::FC_String, // 0x08 DW_FORM_string
- DWARFFormValue::FC_Block, // 0x09 DW_FORM_block
- DWARFFormValue::FC_Block, // 0x0a DW_FORM_block1
- DWARFFormValue::FC_Constant, // 0x0b DW_FORM_data1
- DWARFFormValue::FC_Flag, // 0x0c DW_FORM_flag
- DWARFFormValue::FC_Constant, // 0x0d DW_FORM_sdata
- DWARFFormValue::FC_String, // 0x0e DW_FORM_strp
- DWARFFormValue::FC_Constant, // 0x0f DW_FORM_udata
- DWARFFormValue::FC_Reference, // 0x10 DW_FORM_ref_addr
- DWARFFormValue::FC_Reference, // 0x11 DW_FORM_ref1
- DWARFFormValue::FC_Reference, // 0x12 DW_FORM_ref2
- DWARFFormValue::FC_Reference, // 0x13 DW_FORM_ref4
- DWARFFormValue::FC_Reference, // 0x14 DW_FORM_ref8
- DWARFFormValue::FC_Reference, // 0x15 DW_FORM_ref_udata
- DWARFFormValue::FC_Indirect, // 0x16 DW_FORM_indirect
- DWARFFormValue::FC_SectionOffset, // 0x17 DW_FORM_sec_offset
- DWARFFormValue::FC_Exprloc, // 0x18 DW_FORM_exprloc
- DWARFFormValue::FC_Flag, // 0x19 DW_FORM_flag_present
+ DWARFFormValue::FC_Unknown, // 0x0
+ DWARFFormValue::FC_Address, // 0x01 DW_FORM_addr
+ DWARFFormValue::FC_Unknown, // 0x02 unused
+ DWARFFormValue::FC_Block, // 0x03 DW_FORM_block2
+ DWARFFormValue::FC_Block, // 0x04 DW_FORM_block4
+ DWARFFormValue::FC_Constant, // 0x05 DW_FORM_data2
+ // --- These can be FC_SectionOffset in DWARF3 and below:
+ DWARFFormValue::FC_Constant, // 0x06 DW_FORM_data4
+ DWARFFormValue::FC_Constant, // 0x07 DW_FORM_data8
+ // ---
+ DWARFFormValue::FC_String, // 0x08 DW_FORM_string
+ DWARFFormValue::FC_Block, // 0x09 DW_FORM_block
+ DWARFFormValue::FC_Block, // 0x0a DW_FORM_block1
+ DWARFFormValue::FC_Constant, // 0x0b DW_FORM_data1
+ DWARFFormValue::FC_Flag, // 0x0c DW_FORM_flag
+ DWARFFormValue::FC_Constant, // 0x0d DW_FORM_sdata
+ DWARFFormValue::FC_String, // 0x0e DW_FORM_strp
+ DWARFFormValue::FC_Constant, // 0x0f DW_FORM_udata
+ DWARFFormValue::FC_Reference, // 0x10 DW_FORM_ref_addr
+ DWARFFormValue::FC_Reference, // 0x11 DW_FORM_ref1
+ DWARFFormValue::FC_Reference, // 0x12 DW_FORM_ref2
+ DWARFFormValue::FC_Reference, // 0x13 DW_FORM_ref4
+ DWARFFormValue::FC_Reference, // 0x14 DW_FORM_ref8
+ DWARFFormValue::FC_Reference, // 0x15 DW_FORM_ref_udata
+ DWARFFormValue::FC_Indirect, // 0x16 DW_FORM_indirect
+ DWARFFormValue::FC_SectionOffset, // 0x17 DW_FORM_sec_offset
+ DWARFFormValue::FC_Exprloc, // 0x18 DW_FORM_exprloc
+ DWARFFormValue::FC_Flag, // 0x19 DW_FORM_flag_present
};
namespace {
@@ -83,10 +83,10 @@ public:
uint8_t getDwarfOffsetByteSize() const {
switch (Format) {
- case dwarf::DwarfFormat::DWARF32:
- return 4;
- case dwarf::DwarfFormat::DWARF64:
- return 8;
+ case dwarf::DwarfFormat::DWARF32:
+ return 4;
+ case dwarf::DwarfFormat::DWARF64:
+ return 8;
}
llvm_unreachable("Invalid Format value");
}
@@ -97,83 +97,83 @@ public:
template <class T>
static Optional<uint8_t> getFixedByteSize(dwarf::Form Form, const T *U) {
switch (Form) {
- case DW_FORM_addr:
- if (U)
- return U->getAddressByteSize();
- return None;
+ case DW_FORM_addr:
+ if (U)
+ return U->getAddressByteSize();
+ return None;
- case DW_FORM_block: // ULEB128 length L followed by L bytes.
- case DW_FORM_block1: // 1 byte length L followed by L bytes.
- case DW_FORM_block2: // 2 byte length L followed by L bytes.
- case DW_FORM_block4: // 4 byte length L followed by L bytes.
- case DW_FORM_string: // C-string with null terminator.
- case DW_FORM_sdata: // SLEB128.
- case DW_FORM_udata: // ULEB128.
- case DW_FORM_ref_udata: // ULEB128.
- case DW_FORM_indirect: // ULEB128.
- case DW_FORM_exprloc: // ULEB128 length L followed by L bytes.
- case DW_FORM_strx: // ULEB128.
- case DW_FORM_addrx: // ULEB128.
- case DW_FORM_loclistx: // ULEB128.
- case DW_FORM_rnglistx: // ULEB128.
- case DW_FORM_GNU_addr_index: // ULEB128.
- case DW_FORM_GNU_str_index: // ULEB128.
- return None;
+ case DW_FORM_block: // ULEB128 length L followed by L bytes.
+ case DW_FORM_block1: // 1 byte length L followed by L bytes.
+ case DW_FORM_block2: // 2 byte length L followed by L bytes.
+ case DW_FORM_block4: // 4 byte length L followed by L bytes.
+ case DW_FORM_string: // C-string with null terminator.
+ case DW_FORM_sdata: // SLEB128.
+ case DW_FORM_udata: // ULEB128.
+ case DW_FORM_ref_udata: // ULEB128.
+ case DW_FORM_indirect: // ULEB128.
+ case DW_FORM_exprloc: // ULEB128 length L followed by L bytes.
+ case DW_FORM_strx: // ULEB128.
+ case DW_FORM_addrx: // ULEB128.
+ case DW_FORM_loclistx: // ULEB128.
+ case DW_FORM_rnglistx: // ULEB128.
+ case DW_FORM_GNU_addr_index: // ULEB128.
+ case DW_FORM_GNU_str_index: // ULEB128.
+ return None;
- case DW_FORM_ref_addr:
- if (U)
- return U->getRefAddrByteSize();
- return None;
+ case DW_FORM_ref_addr:
+ if (U)
+ return U->getRefAddrByteSize();
+ return None;
- case DW_FORM_flag:
- case DW_FORM_data1:
- case DW_FORM_ref1:
- case DW_FORM_strx1:
- case DW_FORM_addrx1:
- return 1;
+ case DW_FORM_flag:
+ case DW_FORM_data1:
+ case DW_FORM_ref1:
+ case DW_FORM_strx1:
+ case DW_FORM_addrx1:
+ return 1;
- case DW_FORM_data2:
- case DW_FORM_ref2:
- case DW_FORM_strx2:
- case DW_FORM_addrx2:
- return 2;
+ case DW_FORM_data2:
+ case DW_FORM_ref2:
+ case DW_FORM_strx2:
+ case DW_FORM_addrx2:
+ return 2;
- case DW_FORM_data4:
- case DW_FORM_ref4:
- case DW_FORM_ref_sup4:
- case DW_FORM_strx4:
- case DW_FORM_addrx4:
- return 4;
+ case DW_FORM_data4:
+ case DW_FORM_ref4:
+ case DW_FORM_ref_sup4:
+ case DW_FORM_strx4:
+ case DW_FORM_addrx4:
+ return 4;
- case DW_FORM_strp:
- case DW_FORM_GNU_ref_alt:
- case DW_FORM_GNU_strp_alt:
- case DW_FORM_line_strp:
- case DW_FORM_sec_offset:
- case DW_FORM_strp_sup:
- if (U)
- return U->getDwarfOffsetByteSize();
- return None;
+ case DW_FORM_strp:
+ case DW_FORM_GNU_ref_alt:
+ case DW_FORM_GNU_strp_alt:
+ case DW_FORM_line_strp:
+ case DW_FORM_sec_offset:
+ case DW_FORM_strp_sup:
+ if (U)
+ return U->getDwarfOffsetByteSize();
+ return None;
- case DW_FORM_data8:
- case DW_FORM_ref8:
- case DW_FORM_ref_sig8:
- case DW_FORM_ref_sup8:
- return 8;
+ case DW_FORM_data8:
+ case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
+ case DW_FORM_ref_sup8:
+ return 8;
- case DW_FORM_flag_present:
- return 0;
+ case DW_FORM_flag_present:
+ return 0;
- case DW_FORM_data16:
- return 16;
+ case DW_FORM_data16:
+ return 16;
- case DW_FORM_implicit_const:
- // The implicit value is stored in the abbreviation as a SLEB128, and
- // there no data in debug info.
- return 0;
+ case DW_FORM_implicit_const:
+ // The implicit value is stored in the abbreviation as a SLEB128, and
+ // there no data in debug info.
+ return 0;
- default:
- llvm_unreachable("Handle this form in this switch statement");
+ default:
+ llvm_unreachable("Handle this form in this switch statement");
}
return None;
}
@@ -184,91 +184,91 @@ static bool skipFormValue(dwarf::Form Form, const DataExtractor &DebugInfoData,
bool Indirect = false;
do {
switch (Form) {
- // Blocks of inlined data that have a length field and the data bytes
- // inlined in the .debug_info.
- case DW_FORM_exprloc:
- case DW_FORM_block: {
- uint64_t size = DebugInfoData.getULEB128(OffsetPtr);
- *OffsetPtr += size;
- return true;
- }
- case DW_FORM_block1: {
- uint8_t size = DebugInfoData.getU8(OffsetPtr);
- *OffsetPtr += size;
- return true;
- }
- case DW_FORM_block2: {
- uint16_t size = DebugInfoData.getU16(OffsetPtr);
- *OffsetPtr += size;
- return true;
- }
- case DW_FORM_block4: {
- uint32_t size = DebugInfoData.getU32(OffsetPtr);
- *OffsetPtr += size;
+ // Blocks of inlined data that have a length field and the data bytes
+ // inlined in the .debug_info.
+ case DW_FORM_exprloc:
+ case DW_FORM_block: {
+ uint64_t size = DebugInfoData.getULEB128(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+ case DW_FORM_block1: {
+ uint8_t size = DebugInfoData.getU8(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+ case DW_FORM_block2: {
+ uint16_t size = DebugInfoData.getU16(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+ case DW_FORM_block4: {
+ uint32_t size = DebugInfoData.getU32(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+
+ // Inlined NULL terminated C-strings.
+ case DW_FORM_string:
+ DebugInfoData.getCStr(OffsetPtr);
+ return true;
+
+ case DW_FORM_addr:
+ case DW_FORM_ref_addr:
+ case DW_FORM_flag_present:
+ case DW_FORM_data1:
+ case DW_FORM_data2:
+ case DW_FORM_data4:
+ case DW_FORM_data8:
+ case DW_FORM_flag:
+ case DW_FORM_ref1:
+ case DW_FORM_ref2:
+ case DW_FORM_ref4:
+ case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
+ case DW_FORM_ref_sup4:
+ case DW_FORM_ref_sup8:
+ case DW_FORM_strx1:
+ case DW_FORM_strx2:
+ case DW_FORM_strx4:
+ case DW_FORM_addrx1:
+ case DW_FORM_addrx2:
+ case DW_FORM_addrx4:
+ case DW_FORM_sec_offset:
+ case DW_FORM_strp:
+ case DW_FORM_strp_sup:
+ case DW_FORM_line_strp:
+ case DW_FORM_GNU_ref_alt:
+ case DW_FORM_GNU_strp_alt:
+ if (Optional<uint8_t> FixedSize = ::getFixedByteSize(Form, U)) {
+ *OffsetPtr += *FixedSize;
return true;
}
+ return false;
- // Inlined NULL terminated C-strings.
- case DW_FORM_string:
- DebugInfoData.getCStr(OffsetPtr);
- return true;
+ // signed or unsigned LEB 128 values.
+ case DW_FORM_sdata:
+ DebugInfoData.getSLEB128(OffsetPtr);
+ return true;
- case DW_FORM_addr:
- case DW_FORM_ref_addr:
- case DW_FORM_flag_present:
- case DW_FORM_data1:
- case DW_FORM_data2:
- case DW_FORM_data4:
- case DW_FORM_data8:
- case DW_FORM_flag:
- case DW_FORM_ref1:
- case DW_FORM_ref2:
- case DW_FORM_ref4:
- case DW_FORM_ref8:
- case DW_FORM_ref_sig8:
- case DW_FORM_ref_sup4:
- case DW_FORM_ref_sup8:
- case DW_FORM_strx1:
- case DW_FORM_strx2:
- case DW_FORM_strx4:
- case DW_FORM_addrx1:
- case DW_FORM_addrx2:
- case DW_FORM_addrx4:
- case DW_FORM_sec_offset:
- case DW_FORM_strp:
- case DW_FORM_strp_sup:
- case DW_FORM_line_strp:
- case DW_FORM_GNU_ref_alt:
- case DW_FORM_GNU_strp_alt:
- if (Optional<uint8_t> FixedSize = ::getFixedByteSize(Form, U)) {
- *OffsetPtr += *FixedSize;
- return true;
- }
- return false;
+ case DW_FORM_udata:
+ case DW_FORM_ref_udata:
+ case DW_FORM_strx:
+ case DW_FORM_addrx:
+ case DW_FORM_loclistx:
+ case DW_FORM_rnglistx:
+ case DW_FORM_GNU_addr_index:
+ case DW_FORM_GNU_str_index:
+ DebugInfoData.getULEB128(OffsetPtr);
+ return true;
- // signed or unsigned LEB 128 values.
- case DW_FORM_sdata:
- DebugInfoData.getSLEB128(OffsetPtr);
- return true;
+ case DW_FORM_indirect:
+ Indirect = true;
+ Form = static_cast<dwarf::Form>(DebugInfoData.getULEB128(OffsetPtr));
+ break;
- case DW_FORM_udata:
- case DW_FORM_ref_udata:
- case DW_FORM_strx:
- case DW_FORM_addrx:
- case DW_FORM_loclistx:
- case DW_FORM_rnglistx:
- case DW_FORM_GNU_addr_index:
- case DW_FORM_GNU_str_index:
- DebugInfoData.getULEB128(OffsetPtr);
- return true;
-
- case DW_FORM_indirect:
- Indirect = true;
- Form = static_cast<dwarf::Form>(DebugInfoData.getULEB128(OffsetPtr));
- break;
-
- default:
- return false;
+ default:
+ return false;
}
} while (Indirect);
return true;
@@ -316,87 +316,84 @@ bool DWARFFormValue::isFormClass(DWARFFormValue::FormClass FC) const {
FC == FC_SectionOffset;
}
-bool DWARFFormValue::extractValue(const DataExtractor &data,
- uint32_t *offset_ptr,
- const DWARFUnit *cu) {
- U = cu;
- bool indirect = false;
- bool is_block = false;
+bool DWARFFormValue::extractValue(const DataExtractor &Data,
+ uint32_t *OffsetPtr, const DWARFUnit *CU) {
+ U = CU;
+ bool Indirect = false;
+ bool IsBlock = false;
Value.data = nullptr;
// Read the value for the form into value and follow and DW_FORM_indirect
// instances we run into
do {
- indirect = false;
+ Indirect = false;
switch (Form) {
case DW_FORM_addr:
case DW_FORM_ref_addr: {
if (!U)
return false;
- uint16_t AddrSize =
- (Form == DW_FORM_addr)
- ? U->getAddressByteSize()
- : U->getRefAddrByteSize();
+ uint16_t AddrSize = (Form == DW_FORM_addr) ? U->getAddressByteSize()
+ : U->getRefAddrByteSize();
Value.uval =
- getRelocatedValue(data, AddrSize, offset_ptr, U->getRelocMap());
+ getRelocatedValue(Data, AddrSize, OffsetPtr, U->getRelocMap());
break;
}
case DW_FORM_exprloc:
case DW_FORM_block:
- Value.uval = data.getULEB128(offset_ptr);
- is_block = true;
+ Value.uval = Data.getULEB128(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_block1:
- Value.uval = data.getU8(offset_ptr);
- is_block = true;
+ Value.uval = Data.getU8(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_block2:
- Value.uval = data.getU16(offset_ptr);
- is_block = true;
+ Value.uval = Data.getU16(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_block4:
- Value.uval = data.getU32(offset_ptr);
- is_block = true;
+ Value.uval = Data.getU32(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_data1:
case DW_FORM_ref1:
case DW_FORM_flag:
case DW_FORM_strx1:
case DW_FORM_addrx1:
- Value.uval = data.getU8(offset_ptr);
+ Value.uval = Data.getU8(OffsetPtr);
break;
case DW_FORM_data2:
case DW_FORM_ref2:
case DW_FORM_strx2:
case DW_FORM_addrx2:
- Value.uval = data.getU16(offset_ptr);
+ Value.uval = Data.getU16(OffsetPtr);
break;
case DW_FORM_data4:
case DW_FORM_ref4:
case DW_FORM_ref_sup4:
case DW_FORM_strx4:
case DW_FORM_addrx4: {
- const RelocAddrMap* RelocMap = U ? U->getRelocMap() : nullptr;
- Value.uval = getRelocatedValue(data, 4, offset_ptr, RelocMap);
+ const RelocAddrMap *RelocMap = U ? U->getRelocMap() : nullptr;
+ Value.uval = getRelocatedValue(Data, 4, OffsetPtr, RelocMap);
break;
}
case DW_FORM_data8:
case DW_FORM_ref8:
case DW_FORM_ref_sup8:
- Value.uval = data.getU64(offset_ptr);
+ Value.uval = Data.getU64(OffsetPtr);
break;
case DW_FORM_sdata:
- Value.sval = data.getSLEB128(offset_ptr);
+ Value.sval = Data.getSLEB128(OffsetPtr);
break;
case DW_FORM_udata:
case DW_FORM_ref_udata:
- Value.uval = data.getULEB128(offset_ptr);
+ Value.uval = Data.getULEB128(OffsetPtr);
break;
case DW_FORM_string:
- Value.cstr = data.getCStr(offset_ptr);
+ Value.cstr = Data.getCStr(OffsetPtr);
break;
case DW_FORM_indirect:
- Form = static_cast<dwarf::Form>(data.getULEB128(offset_ptr));
- indirect = true;
+ Form = static_cast<dwarf::Form>(Data.getULEB128(OffsetPtr));
+ Indirect = true;
break;
case DW_FORM_strp:
case DW_FORM_sec_offset:
@@ -406,82 +403,93 @@ bool DWARFFormValue::extractValue(const DataExtractor &data,
case DW_FORM_strp_sup: {
if (!U)
return false;
- Value.uval = getRelocatedValue(data, U->getDwarfOffsetByteSize(),
- offset_ptr, U->getRelocMap());
+ Value.uval = getRelocatedValue(Data, U->getDwarfOffsetByteSize(),
+ OffsetPtr, U->getRelocMap());
break;
}
case DW_FORM_flag_present:
Value.uval = 1;
break;
case DW_FORM_ref_sig8:
- Value.uval = data.getU64(offset_ptr);
+ Value.uval = Data.getU64(OffsetPtr);
break;
case DW_FORM_GNU_addr_index:
case DW_FORM_GNU_str_index:
- Value.uval = data.getULEB128(offset_ptr);
+ Value.uval = Data.getULEB128(OffsetPtr);
break;
default:
// DWARFFormValue::skipValue() will have caught this and caused all
// DWARF DIEs to fail to be parsed, so this code is not be reachable.
llvm_unreachable("unsupported form");
}
- } while (indirect);
+ } while (Indirect);
- if (is_block) {
- StringRef str = data.getData().substr(*offset_ptr, Value.uval);
+ if (IsBlock) {
+ StringRef Str = Data.getData().substr(*OffsetPtr, Value.uval);
Value.data = nullptr;
- if (!str.empty()) {
- Value.data = reinterpret_cast<const uint8_t *>(str.data());
- *offset_ptr += Value.uval;
+ if (!Str.empty()) {
+ Value.data = reinterpret_cast<const uint8_t *>(Str.data());
+ *OffsetPtr += Value.uval;
}
}
return true;
}
-bool DWARFFormValue::skipValue(DataExtractor DebugInfoData,
- uint32_t *offset_ptr, const DWARFUnit *U) const {
- return DWARFFormValue::skipValue(Form, DebugInfoData, offset_ptr, U);
+bool DWARFFormValue::skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
+ const DWARFUnit *U) const {
+ return DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, U);
}
-bool DWARFFormValue::skipValue(dwarf::Form form, DataExtractor DebugInfoData,
- uint32_t *offset_ptr, const DWARFUnit *U) {
- return skipFormValue(form, DebugInfoData, offset_ptr, U);
+bool DWARFFormValue::skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, const DWARFUnit *U) {
+ return skipFormValue(Form, DebugInfoData, OffsetPtr, U);
}
-bool DWARFFormValue::skipValue(dwarf::Form form, DataExtractor DebugInfoData,
- uint32_t *offset_ptr, uint16_t Version,
+bool DWARFFormValue::skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, uint16_t Version,
uint8_t AddrSize,
llvm::dwarf::DwarfFormat Format) {
FormSizeHelper FSH(Version, AddrSize, Format);
- return skipFormValue(form, DebugInfoData, offset_ptr, &FSH);
+ return skipFormValue(Form, DebugInfoData, OffsetPtr, &FSH);
}
-void
-DWARFFormValue::dump(raw_ostream &OS) const {
- uint64_t uvalue = Value.uval;
- bool cu_relative_offset = false;
+void DWARFFormValue::dump(raw_ostream &OS) const {
+ uint64_t UValue = Value.uval;
+ bool CURelativeOffset = false;
switch (Form) {
- case DW_FORM_addr: OS << format("0x%016" PRIx64, uvalue); break;
+ case DW_FORM_addr:
+ OS << format("0x%016" PRIx64, UValue);
+ break;
case DW_FORM_GNU_addr_index: {
- OS << format(" indexed (%8.8x) address = ", (uint32_t)uvalue);
+ OS << format(" indexed (%8.8x) address = ", (uint32_t)UValue);
uint64_t Address;
if (U == nullptr)
OS << "<invalid dwarf unit>";
- else if (U->getAddrOffsetSectionItem(uvalue, Address))
+ else if (U->getAddrOffsetSectionItem(UValue, Address))
OS << format("0x%016" PRIx64, Address);
else
OS << "<no .debug_addr section>";
break;
}
- case DW_FORM_flag_present: OS << "true"; break;
+ case DW_FORM_flag_present:
+ OS << "true";
+ break;
case DW_FORM_flag:
- case DW_FORM_data1: OS << format("0x%02x", (uint8_t)uvalue); break;
- case DW_FORM_data2: OS << format("0x%04x", (uint16_t)uvalue); break;
- case DW_FORM_data4: OS << format("0x%08x", (uint32_t)uvalue); break;
+ case DW_FORM_data1:
+ OS << format("0x%02x", (uint8_t)UValue);
+ break;
+ case DW_FORM_data2:
+ OS << format("0x%04x", (uint16_t)UValue);
+ break;
+ case DW_FORM_data4:
+ OS << format("0x%08x", (uint32_t)UValue);
+ break;
case DW_FORM_ref_sig8:
- case DW_FORM_data8: OS << format("0x%016" PRIx64, uvalue); break;
+ case DW_FORM_data8:
+ OS << format("0x%016" PRIx64, UValue);
+ break;
case DW_FORM_string:
OS << '"';
OS.write_escaped(Value.cstr);
@@ -492,80 +500,92 @@ DWARFFormValue::dump(raw_ostream &OS) const {
case DW_FORM_block1:
case DW_FORM_block2:
case DW_FORM_block4:
- if (uvalue > 0) {
+ if (UValue > 0) {
switch (Form) {
case DW_FORM_exprloc:
- case DW_FORM_block: OS << format("<0x%" PRIx64 "> ", uvalue); break;
- case DW_FORM_block1: OS << format("<0x%2.2x> ", (uint8_t)uvalue); break;
- case DW_FORM_block2: OS << format("<0x%4.4x> ", (uint16_t)uvalue); break;
- case DW_FORM_block4: OS << format("<0x%8.8x> ", (uint32_t)uvalue); break;
- default: break;
+ case DW_FORM_block:
+ OS << format("<0x%" PRIx64 "> ", UValue);
+ break;
+ case DW_FORM_block1:
+ OS << format("<0x%2.2x> ", (uint8_t)UValue);
+ break;
+ case DW_FORM_block2:
+ OS << format("<0x%4.4x> ", (uint16_t)UValue);
+ break;
+ case DW_FORM_block4:
+ OS << format("<0x%8.8x> ", (uint32_t)UValue);
+ break;
+ default:
+ break;
}
- const uint8_t* data_ptr = Value.data;
- if (data_ptr) {
- // uvalue contains size of block
- const uint8_t* end_data_ptr = data_ptr + uvalue;
- while (data_ptr < end_data_ptr) {
- OS << format("%2.2x ", *data_ptr);
- ++data_ptr;
+ const uint8_t *DataPtr = Value.data;
+ if (DataPtr) {
+ // UValue contains size of block
+ const uint8_t *EndDataPtr = DataPtr + UValue;
+ while (DataPtr < EndDataPtr) {
+ OS << format("%2.2x ", *DataPtr);
+ ++DataPtr;
}
- }
- else
+ } else
OS << "NULL";
}
break;
- case DW_FORM_sdata: OS << Value.sval; break;
- case DW_FORM_udata: OS << Value.uval; break;
+ case DW_FORM_sdata:
+ OS << Value.sval;
+ break;
+ case DW_FORM_udata:
+ OS << Value.uval;
+ break;
case DW_FORM_strp:
- OS << format(" .debug_str[0x%8.8x] = ", (uint32_t)uvalue);
+ OS << format(" .debug_str[0x%8.8x] = ", (uint32_t)UValue);
dumpString(OS);
break;
case DW_FORM_GNU_str_index:
- OS << format(" indexed (%8.8x) string = ", (uint32_t)uvalue);
+ OS << format(" indexed (%8.8x) string = ", (uint32_t)UValue);
dumpString(OS);
break;
case DW_FORM_GNU_strp_alt:
- OS << format("alt indirect string, offset: 0x%" PRIx64 "", uvalue);
+ OS << format("alt indirect string, offset: 0x%" PRIx64 "", UValue);
dumpString(OS);
break;
case DW_FORM_ref_addr:
- OS << format("0x%016" PRIx64, uvalue);
+ OS << format("0x%016" PRIx64, UValue);
break;
case DW_FORM_ref1:
- cu_relative_offset = true;
- OS << format("cu + 0x%2.2x", (uint8_t)uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%2.2x", (uint8_t)UValue);
break;
case DW_FORM_ref2:
- cu_relative_offset = true;
- OS << format("cu + 0x%4.4x", (uint16_t)uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%4.4x", (uint16_t)UValue);
break;
case DW_FORM_ref4:
- cu_relative_offset = true;
- OS << format("cu + 0x%4.4x", (uint32_t)uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%4.4x", (uint32_t)UValue);
break;
case DW_FORM_ref8:
- cu_relative_offset = true;
- OS << format("cu + 0x%8.8" PRIx64, uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%8.8" PRIx64, UValue);
break;
case DW_FORM_ref_udata:
- cu_relative_offset = true;
- OS << format("cu + 0x%" PRIx64, uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%" PRIx64, UValue);
break;
case DW_FORM_GNU_ref_alt:
- OS << format("<alt 0x%" PRIx64 ">", uvalue);
+ OS << format("<alt 0x%" PRIx64 ">", UValue);
break;
- // All DW_FORM_indirect attributes should be resolved prior to calling
- // this function
+ // All DW_FORM_indirect attributes should be resolved prior to calling
+ // this function
case DW_FORM_indirect:
OS << "DW_FORM_indirect";
break;
- // Should be formatted to 64-bit for DWARF64.
+ // Should be formatted to 64-bit for DWARF64.
case DW_FORM_sec_offset:
- OS << format("0x%08x", (uint32_t)uvalue);
+ OS << format("0x%08x", (uint32_t)UValue);
break;
default:
@@ -573,10 +593,10 @@ DWARFFormValue::dump(raw_ostream &OS) const {
break;
}
- if (cu_relative_offset) {
+ if (CURelativeOffset) {
OS << " => {";
WithColor(OS, syntax::Address).get()
- << format("0x%8.8" PRIx64, uvalue + (U ? U->getOffset() : 0));
+ << format("0x%8.8" PRIx64, UValue + (U ? U->getOffset() : 0));
OS << "}";
}
}
@@ -653,15 +673,16 @@ Optional<uint64_t> DWARFFormValue::getAsSectionOffset() const {
}
Optional<uint64_t> DWARFFormValue::getAsUnsignedConstant() const {
- if ((!isFormClass(FC_Constant) && !isFormClass(FC_Flag))
- || Form == DW_FORM_sdata)
+ if ((!isFormClass(FC_Constant) && !isFormClass(FC_Flag)) ||
+ Form == DW_FORM_sdata)
return None;
return Value.uval;
}
Optional<int64_t> DWARFFormValue::getAsSignedConstant() const {
if ((!isFormClass(FC_Constant) && !isFormClass(FC_Flag)) ||
- (Form == DW_FORM_udata && uint64_t(std::numeric_limits<int64_t>::max()) < Value.uval))
+ (Form == DW_FORM_udata &&
+ uint64_t(std::numeric_limits<int64_t>::max()) < Value.uval))
return None;
switch (Form) {
case DW_FORM_data4:
diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/DbiModuleList.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/DbiModuleList.cpp
new file mode 100644
index 000000000000..434f775097e0
--- /dev/null
+++ b/contrib/llvm/lib/DebugInfo/PDB/Native/DbiModuleList.cpp
@@ -0,0 +1,273 @@
+//===- DbiModuleList.cpp - PDB module information list ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
+
+#include "llvm/DebugInfo/PDB/Native/RawError.h"
+#include "llvm/Support/Error.h"
+
+using namespace llvm;
+using namespace llvm::pdb;
+
+DbiModuleSourceFilesIterator::DbiModuleSourceFilesIterator(
+ const DbiModuleList &Modules, uint32_t Modi, uint16_t Filei)
+ : Modules(&Modules), Modi(Modi), Filei(Filei) {
+ setValue();
+}
+
+bool DbiModuleSourceFilesIterator::
+operator==(const DbiModuleSourceFilesIterator &R) const {
+ // incompatible iterators are never equal
+ if (!isCompatible(R))
+ return false;
+
+ // If they're compatible, and they're both ends, then they're equal.
+ if (isEnd() && R.isEnd())
+ return true;
+
+ // If one is an end and the other is not, they're not equal.
+ if (isEnd() != R.isEnd())
+ return false;
+
+ // Now we know:
+ // - They're compatible
+ // - They're not *both* end iterators
+ // - Their endness is the same.
+ // Thus, they're compatible iterators pointing to a valid file on the same
+ // module. All we need to check are the file indices.
+ assert(Modules == R.Modules);
+ assert(Modi == R.Modi);
+ assert(!isEnd());
+ assert(!R.isEnd());
+
+ return (Filei == R.Filei);
+}
+
+bool DbiModuleSourceFilesIterator::
+operator<(const DbiModuleSourceFilesIterator &R) const {
+ assert(isCompatible(R));
+
+ // It's not sufficient to compare the file indices, because default
+ // constructed iterators could be equal to iterators with valid indices. To
+ // account for this, early-out if they're equal.
+ if (*this == R)
+ return false;
+
+ return Filei < R.Filei;
+}
+
+std::ptrdiff_t DbiModuleSourceFilesIterator::
+operator-(const DbiModuleSourceFilesIterator &R) const {
+ assert(isCompatible(R));
+ assert(!(*this < R));
+
+ // If they're both end iterators, the distance is 0.
+ if (isEnd() && R.isEnd())
+ return 0;
+
+ assert(!R.isEnd());
+
+ // At this point, R cannot be end, but *this can, which means that *this
+ // might be a universal end iterator with none of its fields set. So in that
+ // case have to rely on R as the authority to figure out how many files there
+ // are to compute the distance.
+ uint32_t Thisi = Filei;
+ if (isEnd()) {
+ uint32_t RealModi = R.Modi;
+ Thisi = R.Modules->getSourceFileCount(RealModi);
+ }
+
+ assert(Thisi >= R.Filei);
+ return Thisi - R.Filei;
+}
+
+DbiModuleSourceFilesIterator &DbiModuleSourceFilesIterator::
+operator+=(std::ptrdiff_t N) {
+ assert(!isEnd());
+
+ Filei += N;
+ assert(Filei <= Modules->getSourceFileCount(Modi));
+ setValue();
+ return *this;
+}
+
+DbiModuleSourceFilesIterator &DbiModuleSourceFilesIterator::
+operator-=(std::ptrdiff_t N) {
+ // Note that we can subtract from an end iterator, but not a universal end
+ // iterator.
+ assert(!isUniversalEnd());
+
+ assert(N <= Filei);
+
+ Filei -= N;
+ return *this;
+}
+
+void DbiModuleSourceFilesIterator::setValue() {
+ if (isEnd()) {
+ ThisValue = "";
+ return;
+ }
+
+ uint32_t Off = Modules->ModuleInitialFileIndex[Modi] + Filei;
+ auto ExpectedValue = Modules->getFileName(Off);
+ if (!ExpectedValue) {
+ consumeError(ExpectedValue.takeError());
+ Filei = Modules->getSourceFileCount(Modi);
+ } else
+ ThisValue = *ExpectedValue;
+}
+
+bool DbiModuleSourceFilesIterator::isEnd() const {
+ if (isUniversalEnd())
+ return true;
+
+ assert(Modules);
+ assert(Modi <= Modules->getModuleCount());
+ assert(Filei <= Modules->getSourceFileCount(Modi));
+
+ if (Modi == Modules->getModuleCount())
+ return true;
+ if (Filei == Modules->getSourceFileCount(Modi))
+ return true;
+ return false;
+}
+
+bool DbiModuleSourceFilesIterator::isUniversalEnd() const { return !Modules; }
+
+bool DbiModuleSourceFilesIterator::isCompatible(
+ const DbiModuleSourceFilesIterator &R) const {
+ // Universal iterators are compatible with any other iterator.
+ if (isUniversalEnd() || R.isUniversalEnd())
+ return true;
+
+ // At this point, neither iterator is a universal end iterator, although one
+ // or both might be non-universal end iterators. Regardless, the module index
+ // is valid, so they are compatible if and only if they refer to the same
+ // module.
+ return Modi == R.Modi;
+}
+
+Error DbiModuleList::initialize(BinaryStreamRef ModInfo,
+ BinaryStreamRef FileInfo) {
+ if (auto EC = initializeModInfo(ModInfo))
+ return EC;
+ if (auto EC = initializeFileInfo(FileInfo))
+ return EC;
+
+ return Error::success();
+}
+
+Error DbiModuleList::initializeModInfo(BinaryStreamRef ModInfo) {
+ ModInfoSubstream = ModInfo;
+
+ if (ModInfo.getLength() == 0)
+ return Error::success();
+
+ BinaryStreamReader Reader(ModInfo);
+
+ if (auto EC = Reader.readArray(Descriptors, ModInfo.getLength()))
+ return EC;
+
+ return Error::success();
+}
+
+Error DbiModuleList::initializeFileInfo(BinaryStreamRef FileInfo) {
+ FileInfoSubstream = FileInfo;
+
+ if (FileInfo.getLength() == 0)
+ return Error::success();
+
+ BinaryStreamReader FISR(FileInfo);
+ if (auto EC = FISR.readObject(FileInfoHeader))
+ return EC;
+
+ // First is an array of `NumModules` module indices. This does not seem to be
+ // used for anything meaningful, so we ignore it.
+ FixedStreamArray<support::ulittle16_t> ModuleIndices;
+ if (auto EC = FISR.readArray(ModuleIndices, FileInfoHeader->NumModules))
+ return EC;
+ if (auto EC = FISR.readArray(ModFileCountArray, FileInfoHeader->NumModules))
+ return EC;
+
+ // Compute the real number of source files. We can't trust the value in
+ // `FileInfoHeader->NumSourceFiles` because it is a unit16, and the sum of all
+ // source file counts might be larger than a unit16. So we compute the real
+ // count by summing up the individual counts.
+ uint32_t NumSourceFiles = 0;
+ for (auto Count : ModFileCountArray)
+ NumSourceFiles += Count;
+
+ // In the reference implementation, this array is where the pointer documented
+ // at the definition of ModuleInfoHeader::FileNameOffs points to. Note that
+ // although the field in ModuleInfoHeader is ignored this array is not, as it
+ // is the authority on where each filename begins in the names buffer.
+ if (auto EC = FISR.readArray(FileNameOffsets, NumSourceFiles))
+ return EC;
+
+ if (auto EC = FISR.readStreamRef(NamesBuffer))
+ return EC;
+
+ auto DescriptorIter = Descriptors.begin();
+ uint32_t NextFileIndex = 0;
+ ModuleInitialFileIndex.resize(FileInfoHeader->NumModules);
+ ModuleDescriptorOffsets.resize(FileInfoHeader->NumModules);
+ for (size_t I = 0; I < FileInfoHeader->NumModules; ++I) {
+ assert(DescriptorIter != Descriptors.end());
+ ModuleInitialFileIndex[I] = NextFileIndex;
+ ModuleDescriptorOffsets[I] = DescriptorIter.offset();
+
+ NextFileIndex += ModFileCountArray[I];
+ ++DescriptorIter;
+ }
+
+ assert(DescriptorIter == Descriptors.end());
+ assert(NextFileIndex == NumSourceFiles);
+
+ return Error::success();
+}
+
+uint32_t DbiModuleList::getModuleCount() const {
+ return FileInfoHeader->NumModules;
+}
+
+uint32_t DbiModuleList::getSourceFileCount() const {
+ return FileNameOffsets.size();
+}
+
+uint16_t DbiModuleList::getSourceFileCount(uint32_t Modi) const {
+ return ModFileCountArray[Modi];
+}
+
+DbiModuleDescriptor DbiModuleList::getModuleDescriptor(uint32_t Modi) const {
+ assert(Modi < getModuleCount());
+ uint32_t Offset = ModuleDescriptorOffsets[Modi];
+ auto Iter = Descriptors.at(Offset);
+ assert(Iter != Descriptors.end());
+ return *Iter;
+}
+
+iterator_range<DbiModuleSourceFilesIterator>
+DbiModuleList::source_files(uint32_t Modi) const {
+ return make_range<DbiModuleSourceFilesIterator>(
+ DbiModuleSourceFilesIterator(*this, Modi, 0),
+ DbiModuleSourceFilesIterator());
+}
+
+Expected<StringRef> DbiModuleList::getFileName(uint32_t Index) const {
+ BinaryStreamReader Names(NamesBuffer);
+ if (Index >= getSourceFileCount())
+ return make_error<RawError>(raw_error_code::index_out_of_bounds);
+
+ uint32_t FileOffset = FileNameOffsets[Index];
+ Names.setOffset(FileOffset);
+ StringRef Name;
+ if (auto EC = Names.readCString(Name))
+ return std::move(EC);
+ return Name;
+}
diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/DbiStream.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/DbiStream.cpp
index db703809f7c9..f7538c580ba4 100644
--- a/contrib/llvm/lib/DebugInfo/PDB/Native/DbiStream.cpp
+++ b/contrib/llvm/lib/DebugInfo/PDB/Native/DbiStream.cpp
@@ -107,11 +107,11 @@ Error DbiStream::reload() {
return make_error<RawError>(raw_error_code::corrupt_file,
"DBI type server substream not aligned.");
+ BinaryStreamRef ModInfoSubstream;
+ BinaryStreamRef FileInfoSubstream;
if (auto EC =
Reader.readStreamRef(ModInfoSubstream, Header->ModiSubstreamSize))
return EC;
- if (auto EC = initializeModInfoArray())
- return EC;
if (auto EC = Reader.readStreamRef(SecContrSubstream,
Header->SecContrSubstreamSize))
@@ -129,14 +129,15 @@ Error DbiStream::reload() {
DbgStreams, Header->OptionalDbgHdrSize / sizeof(ulittle16_t)))
return EC;
+ if (auto EC = Modules.initialize(ModInfoSubstream, FileInfoSubstream))
+ return EC;
+
if (auto EC = initializeSectionContributionData())
return EC;
if (auto EC = initializeSectionHeadersData())
return EC;
if (auto EC = initializeSectionMapData())
return EC;
- if (auto EC = initializeFileInfo())
- return EC;
if (auto EC = initializeFpoRecords())
return EC;
@@ -215,7 +216,8 @@ FixedStreamArray<object::FpoData> DbiStream::getFpoRecords() {
return FpoRecords;
}
-ArrayRef<ModuleInfoEx> DbiStream::modules() const { return ModuleInfos; }
+const DbiModuleList &DbiStream::modules() const { return Modules; }
+
FixedStreamArray<SecMapEntry> DbiStream::getSectionMap() const {
return SectionMap;
}
@@ -248,25 +250,6 @@ Error DbiStream::initializeSectionContributionData() {
"Unsupported DBI Section Contribution version");
}
-Error DbiStream::initializeModInfoArray() {
- if (ModInfoSubstream.getLength() == 0)
- return Error::success();
-
- // Since each DbiModuleDescriptor in the stream is a variable length, we have
- // to iterate
- // them to know how many there actually are.
- BinaryStreamReader Reader(ModInfoSubstream);
-
- VarStreamArray<DbiModuleDescriptor> ModInfoArray;
- if (auto EC = Reader.readArray(ModInfoArray, ModInfoSubstream.getLength()))
- return EC;
- for (auto &Info : ModInfoArray) {
- ModuleInfos.emplace_back(Info);
- }
-
- return Error::success();
-}
-
// Initializes this->SectionHeaders.
Error DbiStream::initializeSectionHeadersData() {
if (DbgStreams.size() == 0)
@@ -338,90 +321,9 @@ Error DbiStream::initializeSectionMapData() {
return Error::success();
}
-Error DbiStream::initializeFileInfo() {
- if (FileInfoSubstream.getLength() == 0)
- return Error::success();
-
- const FileInfoSubstreamHeader *FH;
- BinaryStreamReader FISR(FileInfoSubstream);
- if (auto EC = FISR.readObject(FH))
- return EC;
-
- // The number of modules in the stream should be the same as reported by
- // the FileInfoSubstreamHeader.
- if (FH->NumModules != ModuleInfos.size())
- return make_error<RawError>(raw_error_code::corrupt_file,
- "FileInfo substream count doesn't match DBI.");
-
- FixedStreamArray<ulittle16_t> ModIndexArray;
- FixedStreamArray<ulittle16_t> ModFileCountArray;
-
- // First is an array of `NumModules` module indices. This is not used for the
- // same reason that `NumSourceFiles` is not used. It's an array of uint16's,
- // but it's possible there are more than 64k source files, which would imply
- // more than 64k modules (e.g. object files) as well. So we ignore this
- // field.
- if (auto EC = FISR.readArray(ModIndexArray, ModuleInfos.size()))
- return EC;
- if (auto EC = FISR.readArray(ModFileCountArray, ModuleInfos.size()))
- return EC;
-
- // Compute the real number of source files.
- uint32_t NumSourceFiles = 0;
- for (auto Count : ModFileCountArray)
- NumSourceFiles += Count;
-
- // This is the array that in the reference implementation corresponds to
- // `DbiModuleDescriptor::FileLayout::FileNameOffs`, which is commented there
- // as being a
- // pointer. Due to the mentioned problems of pointers causing difficulty
- // when reading from the file on 64-bit systems, we continue to ignore that
- // field in `DbiModuleDescriptor`, and instead build a vector of StringRefs
- // and stores
- // them in `ModuleInfoEx`. The value written to and read from the file is
- // not used anyway, it is only there as a way to store the offsets for the
- // purposes of later accessing the names at runtime.
- if (auto EC = FISR.readArray(FileNameOffsets, NumSourceFiles))
- return EC;
-
- if (auto EC = FISR.readStreamRef(NamesBuffer))
- return EC;
-
- // We go through each ModuleInfo, determine the number N of source files for
- // that module, and then get the next N offsets from the Offsets array, using
- // them to get the corresponding N names from the Names buffer and associating
- // each one with the corresponding module.
- uint32_t NextFileIndex = 0;
- for (size_t I = 0; I < ModuleInfos.size(); ++I) {
- uint32_t NumFiles = ModFileCountArray[I];
- ModuleInfos[I].SourceFiles.resize(NumFiles);
- for (size_t J = 0; J < NumFiles; ++J, ++NextFileIndex) {
- auto ThisName = getFileNameForIndex(NextFileIndex);
- if (!ThisName)
- return ThisName.takeError();
- ModuleInfos[I].SourceFiles[J] = *ThisName;
- }
- }
-
- return Error::success();
-}
-
uint32_t DbiStream::getDebugStreamIndex(DbgHeaderType Type) const {
uint16_t T = static_cast<uint16_t>(Type);
if (T >= DbgStreams.size())
return kInvalidStreamIndex;
return DbgStreams[T];
}
-
-Expected<StringRef> DbiStream::getFileNameForIndex(uint32_t Index) const {
- BinaryStreamReader Names(NamesBuffer);
- if (Index >= FileNameOffsets.size())
- return make_error<RawError>(raw_error_code::index_out_of_bounds);
-
- uint32_t FileOffset = FileNameOffsets[Index];
- Names.setOffset(FileOffset);
- StringRef Name;
- if (auto EC = Names.readCString(Name))
- return std::move(EC);
- return Name;
-}
diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
index 9c0cc0bf8233..77f832582f82 100644
--- a/contrib/llvm/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
+++ b/contrib/llvm/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
@@ -13,7 +13,7 @@ namespace llvm {
namespace pdb {
NativeCompilandSymbol::NativeCompilandSymbol(NativeSession &Session,
- const ModuleInfoEx &MI)
+ DbiModuleDescriptor MI)
: NativeRawSymbol(Session), Module(MI) {}
PDB_SymType NativeCompilandSymbol::getSymTag() const {
@@ -21,7 +21,7 @@ PDB_SymType NativeCompilandSymbol::getSymTag() const {
}
bool NativeCompilandSymbol::isEditAndContinueEnabled() const {
- return Module.Info.hasECInfo();
+ return Module.hasECInfo();
}
uint32_t NativeCompilandSymbol::getLexicalParentId() const { return 0; }
@@ -32,11 +32,11 @@ uint32_t NativeCompilandSymbol::getLexicalParentId() const { return 0; }
// this potential confusion.
std::string NativeCompilandSymbol::getLibraryName() const {
- return Module.Info.getObjFileName();
+ return Module.getObjFileName();
}
std::string NativeCompilandSymbol::getName() const {
- return Module.Info.getModuleName();
+ return Module.getModuleName();
}
} // namespace pdb
diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp
index 7532110d005c..97319fd77d11 100644
--- a/contrib/llvm/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp
+++ b/contrib/llvm/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp
@@ -10,6 +10,7 @@
#include "llvm/DebugInfo/PDB/Native/NativeEnumModules.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
@@ -19,25 +20,25 @@ namespace llvm {
namespace pdb {
NativeEnumModules::NativeEnumModules(NativeSession &PDBSession,
- ArrayRef<ModuleInfoEx> Modules,
+ const DbiModuleList &Modules,
uint32_t Index)
: Session(PDBSession), Modules(Modules), Index(Index) {}
uint32_t NativeEnumModules::getChildCount() const {
- return static_cast<uint32_t>(Modules.size());
+ return static_cast<uint32_t>(Modules.getModuleCount());
}
std::unique_ptr<PDBSymbol>
NativeEnumModules::getChildAtIndex(uint32_t Index) const {
- if (Index >= Modules.size())
+ if (Index >= Modules.getModuleCount())
return nullptr;
- return std::unique_ptr<PDBSymbol>(new PDBSymbolCompiland(Session,
- std::unique_ptr<IPDBRawSymbol>(
- new NativeCompilandSymbol(Session, Modules[Index]))));
+ return std::unique_ptr<PDBSymbol>(new PDBSymbolCompiland(
+ Session, std::unique_ptr<IPDBRawSymbol>(new NativeCompilandSymbol(
+ Session, Modules.getModuleDescriptor(Index)))));
}
std::unique_ptr<PDBSymbol> NativeEnumModules::getNext() {
- if (Index >= Modules.size())
+ if (Index >= Modules.getModuleCount())
return nullptr;
return getChildAtIndex(Index++);
}
diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
index ec2a4b87457c..bb52560be167 100644
--- a/contrib/llvm/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
+++ b/contrib/llvm/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
@@ -26,7 +26,7 @@ NativeExeSymbol::findChildren(PDB_SymType Type) const {
case PDB_SymType::Compiland: {
auto Dbi = File.getPDBDbiStream();
if (Dbi) {
- const auto Modules = Dbi->modules();
+ const DbiModuleList &Modules = Dbi->modules();
return std::unique_ptr<IPDBEnumSymbols>(
new NativeEnumModules(Session, Modules));
}
diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
index 5fef3edf8c2d..c0999d93dbb9 100644
--- a/contrib/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
+++ b/contrib/llvm/lib/DebugInfo/PDB/Native/TpiStream.cpp
@@ -39,20 +39,6 @@ TpiStream::TpiStream(const PDBFile &File,
TpiStream::~TpiStream() = default;
-// Verifies that a given type record matches with a given hash value.
-// Currently we only verify SRC_LINE records.
-Error TpiStream::verifyHashValues() {
- TpiHashVerifier Verifier(HashValues, Header->NumHashBuckets);
- TypeDeserializer Deserializer;
-
- TypeVisitorCallbackPipeline Pipeline;
- Pipeline.addCallbackToPipeline(Deserializer);
- Pipeline.addCallbackToPipeline(Verifier);
-
- CVTypeVisitor Visitor(Pipeline);
- return Visitor.visitTypeStream(TypeRecords);
-}
-
Error TpiStream::reload() {
BinaryStreamReader Reader(*Stream);
@@ -98,7 +84,7 @@ Error TpiStream::reload() {
// There should be a hash value for every type record, or no hashes at all.
uint32_t NumHashValues =
Header->HashValueBuffer.Length / sizeof(ulittle32_t);
- if (NumHashValues != NumTypeRecords() && NumHashValues != 0)
+ if (NumHashValues != getNumTypeRecords() && NumHashValues != 0)
return make_error<RawError>(
raw_error_code::corrupt_file,
"TPI hash count does not match with the number of type records.");
@@ -122,12 +108,6 @@ Error TpiStream::reload() {
}
HashStream = std::move(HS);
-
- // TPI hash table is a parallel array for the type records.
- // Verify that the hash values match with type records.
- if (NumHashValues > 0)
- if (auto EC = verifyHashValues())
- return EC;
}
return Error::success();
@@ -142,7 +122,7 @@ uint32_t TpiStream::TypeIndexBegin() const { return Header->TypeIndexBegin; }
uint32_t TpiStream::TypeIndexEnd() const { return Header->TypeIndexEnd; }
-uint32_t TpiStream::NumTypeRecords() const {
+uint32_t TpiStream::getNumTypeRecords() const {
return TypeIndexEnd() - TypeIndexBegin();
}
@@ -154,7 +134,7 @@ uint16_t TpiStream::getTypeHashStreamAuxIndex() const {
return Header->HashAuxStreamIndex;
}
-uint32_t TpiStream::NumHashBuckets() const { return Header->NumHashBuckets; }
+uint32_t TpiStream::getNumHashBuckets() const { return Header->NumHashBuckets; }
uint32_t TpiStream::getHashKeySize() const { return Header->HashKeySize; }
FixedStreamArray<support::ulittle32_t> TpiStream::getHashValues() const {
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
index 7bfa79445584..e45fdc7aee18 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -861,6 +861,15 @@ RuntimeDyldCheckerImpl::getSubsectionStartingAt(StringRef Name) const {
SymInfo.getOffset());
}
+Optional<uint64_t>
+RuntimeDyldCheckerImpl::getSectionLoadAddress(void *LocalAddress) const {
+ for (auto &S : getRTDyld().Sections) {
+ if (S.getAddress() == LocalAddress)
+ return S.getLoadAddress();
+ }
+ return Optional<uint64_t>();
+}
+
void RuntimeDyldCheckerImpl::registerSection(
StringRef FilePath, unsigned SectionID) {
StringRef FileName = sys::path::filename(FilePath);
@@ -935,3 +944,8 @@ RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
bool LocalAddress) {
return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
}
+
+Optional<uint64_t>
+RuntimeDyldChecker::getSectionLoadAddress(void *LocalAddress) const {
+ return Impl->getSectionLoadAddress(LocalAddress);
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
index b7263be09934..b462ef2c00ce 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -60,6 +60,8 @@ private:
bool IsInsideLoad) const;
StringRef getSubsectionStartingAt(StringRef Name) const;
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddr) const;
+
void registerSection(StringRef FilePath, unsigned SectionID);
void registerStubMap(StringRef FilePath, unsigned SectionID,
const RuntimeDyldImpl::StubMap &RTDyldStubs);
diff --git a/contrib/llvm/lib/IR/ConstantRange.cpp b/contrib/llvm/lib/IR/ConstantRange.cpp
index 5425676e4edc..aeb1257754f3 100644
--- a/contrib/llvm/lib/IR/ConstantRange.cpp
+++ b/contrib/llvm/lib/IR/ConstantRange.cpp
@@ -251,7 +251,7 @@ APInt ConstantRange::getSetSize() const {
}
bool
-ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const {
+ConstantRange::isSizeStrictlySmallerThan(const ConstantRange &Other) const {
assert(getBitWidth() == Other.getBitWidth());
if (isFullSet())
return false;
@@ -260,6 +260,17 @@ ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const {
return (Upper - Lower).ult(Other.Upper - Other.Lower);
}
+bool
+ConstantRange::isSizeLargerThan(uint64_t MaxSize) const {
+ assert(MaxSize && "MaxSize can't be 0.");
+ // If this a full set, we need special handling to avoid needing an extra bit
+ // to represent the size.
+ if (isFullSet())
+ return APInt::getMaxValue(getBitWidth()).ugt(MaxSize - 1);
+
+ return (Upper - Lower).ugt(MaxSize);
+}
+
APInt ConstantRange::getUnsignedMax() const {
if (isFullSet() || isWrappedSet())
return APInt::getMaxValue(getBitWidth());
@@ -374,7 +385,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ule(Lower))
return ConstantRange(CR.Lower, Upper);
- if (isSizeStrictlySmallerThanOf(CR))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -389,7 +400,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ult(Upper)) {
if (CR.Lower.ult(Upper)) {
- if (isSizeStrictlySmallerThanOf(CR))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -405,7 +416,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
return ConstantRange(CR.Lower, Upper);
}
- if (isSizeStrictlySmallerThanOf(CR))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -676,8 +687,8 @@ ConstantRange::add(const ConstantRange &Other) const {
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
- if (X.isSizeStrictlySmallerThanOf(*this) ||
- X.isSizeStrictlySmallerThanOf(Other))
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
// We've wrapped, therefore, full set.
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
return X;
@@ -709,8 +720,8 @@ ConstantRange::sub(const ConstantRange &Other) const {
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
- if (X.isSizeStrictlySmallerThanOf(*this) ||
- X.isSizeStrictlySmallerThanOf(Other))
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
// We've wrapped, therefore, full set.
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
return X;
@@ -766,7 +777,7 @@ ConstantRange::multiply(const ConstantRange &Other) const {
ConstantRange Result_sext(std::min(L, Compare), std::max(L, Compare) + 1);
ConstantRange SR = Result_sext.truncate(getBitWidth());
- return UR.isSizeStrictlySmallerThanOf(SR) ? UR : SR;
+ return UR.isSizeStrictlySmallerThan(SR) ? UR : SR;
}
ConstantRange
diff --git a/contrib/llvm/lib/IR/DataLayout.cpp b/contrib/llvm/lib/IR/DataLayout.cpp
index 93bacdd2e80f..c117d29b7f69 100644
--- a/contrib/llvm/lib/IR/DataLayout.cpp
+++ b/contrib/llvm/lib/IR/DataLayout.cpp
@@ -1,4 +1,4 @@
-//===-- DataLayout.cpp - Data size & alignment routines --------------------==//
+//===- DataLayout.cpp - Data size & alignment routines ---------------------==//
//
// The LLVM Compiler Infrastructure
//
@@ -16,21 +16,27 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Mutex.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <cstdint>
#include <cstdlib>
+#include <tuple>
+#include <utility>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -73,7 +79,6 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
}
}
-
/// getElementContainingOffset - Given a valid offset into the structure,
/// return the structure index that contains it.
unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
@@ -338,7 +343,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
break;
}
case 'n': // Native integer types.
- for (;;) {
+ while (true) {
unsigned Width = getInt(Tok);
if (Width == 0)
report_fatal_error(
@@ -393,7 +398,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
}
}
-DataLayout::DataLayout(const Module *M) : LayoutMap(nullptr) {
+DataLayout::DataLayout(const Module *M) {
init(M);
}
@@ -522,7 +527,7 @@ unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
namespace {
class StructLayoutMap {
- typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy;
+ using LayoutInfoTy = DenseMap<StructType*, StructLayout*>;
LayoutInfoTy LayoutInfo;
public:
@@ -577,7 +582,6 @@ const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
return L;
}
-
unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
@@ -778,4 +782,3 @@ unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
return Log2_32(getPreferredAlignment(GV));
}
-
diff --git a/contrib/llvm/lib/IR/DebugInfo.cpp b/contrib/llvm/lib/IR/DebugInfo.cpp
index c5d39c544304..ca3828420a72 100644
--- a/contrib/llvm/lib/IR/DebugInfo.cpp
+++ b/contrib/llvm/lib/IR/DebugInfo.cpp
@@ -1,4 +1,4 @@
-//===--- DebugInfo.cpp - Debug Information Helper Classes -----------------===//
+//===- DebugInfo.cpp - Debug Information Helper Classes -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,22 +12,29 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/DebugInfo.h"
-#include "LLVMContextImpl.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DIBuilder.h"
-#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GVMaterializer.h"
-#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <utility>
+
using namespace llvm;
using namespace llvm::dwarf;
@@ -249,7 +256,7 @@ bool DebugInfoFinder::addScope(DIScope *Scope) {
return true;
}
-static llvm::MDNode *stripDebugLocFromLoopID(llvm::MDNode *N) {
+static MDNode *stripDebugLocFromLoopID(MDNode *N) {
assert(N->op_begin() != N->op_end() && "Missing self reference?");
// if there is no debug location, we do not have to rewrite this MDNode.
@@ -288,7 +295,7 @@ bool llvm::stripDebugInfo(Function &F) {
F.setSubprogram(nullptr);
}
- llvm::DenseMap<llvm::MDNode*, llvm::MDNode*> LoopIDsMap;
+ DenseMap<MDNode*, MDNode*> LoopIDsMap;
for (BasicBlock &BB : F) {
for (auto II = BB.begin(), End = BB.end(); II != End;) {
Instruction &I = *II++; // We may delete the instruction, increment now.
@@ -525,7 +532,7 @@ private:
void traverse(MDNode *);
};
-} // Anonymous namespace.
+} // end anonymous namespace
void DebugTypeInfoRemoval::traverse(MDNode *N) {
if (!N || Replacements.count(N))
@@ -590,7 +597,7 @@ bool llvm::stripNonLineTableDebugInfo(Module &M) {
GV.eraseMetadata(LLVMContext::MD_dbg);
DebugTypeInfoRemoval Mapper(M.getContext());
- auto remap = [&](llvm::MDNode *Node) -> llvm::MDNode * {
+ auto remap = [&](MDNode *Node) -> MDNode * {
if (!Node)
return nullptr;
Mapper.traverseAndRemap(Node);
diff --git a/contrib/llvm/lib/IR/Instruction.cpp b/contrib/llvm/lib/IR/Instruction.cpp
index c26699eab4e2..906a28a5c887 100644
--- a/contrib/llvm/lib/IR/Instruction.cpp
+++ b/contrib/llvm/lib/IR/Instruction.cpp
@@ -625,20 +625,41 @@ void Instruction::updateProfWeight(uint64_t S, uint64_t T) {
return;
auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
- if (!ProfDataName || !ProfDataName->getString().equals("branch_weights"))
+ if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
+ !ProfDataName->getString().equals("VP")))
return;
- SmallVector<uint32_t, 4> Weights;
- for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
- // Using APInt::div may be expensive, but most cases should fit in 64 bits.
- APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
- ->getValue()
- .getZExtValue());
- Val *= APInt(128, S);
- Weights.push_back(Val.udiv(APInt(128, T)).getLimitedValue());
- }
MDBuilder MDB(getContext());
- setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
+ SmallVector<Metadata *, 3> Vals;
+ Vals.push_back(ProfileData->getOperand(0));
+ APInt APS(128, S), APT(128, T);
+ if (ProfDataName->getString().equals("branch_weights"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128,
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ else if (ProfDataName->getString().equals("VP"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
+ // The first value is the key of the value profile, which will not change.
+ Vals.push_back(ProfileData->getOperand(i));
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128,
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
}
void Instruction::setProfWeight(uint64_t W) {
diff --git a/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp b/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp
index 01e1b8168afa..9dd712f9ca13 100644
--- a/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp
+++ b/contrib/llvm/lib/IR/ModuleSummaryIndex.cpp
@@ -22,7 +22,7 @@ void ModuleSummaryIndex::collectDefinedFunctionsForModule(
StringRef ModulePath, GVSummaryMapTy &GVSummaryMap) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &GlobSummary : GlobalList.second) {
+ for (auto &GlobSummary : GlobalList.second.SummaryList) {
auto *Summary = dyn_cast_or_null<FunctionSummary>(GlobSummary.get());
if (!Summary)
// Ignore global variable, focus on functions
@@ -40,7 +40,7 @@ void ModuleSummaryIndex::collectDefinedGVSummariesPerModule(
StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &Summary : GlobalList.second) {
+ for (auto &Summary : GlobalList.second.SummaryList) {
ModuleToDefinedGVSummaries[Summary->modulePath()][GUID] = Summary.get();
}
}
@@ -49,10 +49,10 @@ void ModuleSummaryIndex::collectDefinedGVSummariesPerModule(
GlobalValueSummary *
ModuleSummaryIndex::getGlobalValueSummary(uint64_t ValueGUID,
bool PerModuleIndex) const {
- auto SummaryList = findGlobalValueSummaryList(ValueGUID);
- assert(SummaryList != end() && "GlobalValue not found in index");
- assert((!PerModuleIndex || SummaryList->second.size() == 1) &&
+ auto VI = getValueInfo(ValueGUID);
+ assert(VI && "GlobalValue not found in index");
+ assert((!PerModuleIndex || VI.getSummaryList().size() == 1) &&
"Expected a single entry per global value in per-module index");
- auto &Summary = SummaryList->second[0];
+ auto &Summary = VI.getSummaryList()[0];
return Summary.get();
}
diff --git a/contrib/llvm/lib/LTO/LTO.cpp b/contrib/llvm/lib/LTO/LTO.cpp
index 0afa1ba6ecd6..2d2dcdec05fb 100644
--- a/contrib/llvm/lib/LTO/LTO.cpp
+++ b/contrib/llvm/lib/LTO/LTO.cpp
@@ -274,13 +274,14 @@ void llvm::thinLTOResolveWeakForLinkerInIndex(
// when needed.
DenseSet<GlobalValueSummary *> GlobalInvolvedWithAlias;
for (auto &I : Index)
- for (auto &S : I.second)
+ for (auto &S : I.second.SummaryList)
if (auto AS = dyn_cast<AliasSummary>(S.get()))
GlobalInvolvedWithAlias.insert(&AS->getAliasee());
for (auto &I : Index)
- thinLTOResolveWeakForLinkerGUID(I.second, I.first, GlobalInvolvedWithAlias,
- isPrevailing, recordNewLinkage);
+ thinLTOResolveWeakForLinkerGUID(I.second.SummaryList, I.first,
+ GlobalInvolvedWithAlias, isPrevailing,
+ recordNewLinkage);
}
static void thinLTOInternalizeAndPromoteGUID(
@@ -301,7 +302,7 @@ void llvm::thinLTOInternalizeAndPromoteInIndex(
ModuleSummaryIndex &Index,
function_ref<bool(StringRef, GlobalValue::GUID)> isExported) {
for (auto &I : Index)
- thinLTOInternalizeAndPromoteGUID(I.second, I.first, isExported);
+ thinLTOInternalizeAndPromoteGUID(I.second.SummaryList, I.first, isExported);
}
// Requires a destructor for std::vector<InputModule>.
diff --git a/contrib/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/contrib/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
index 440275c34258..b4ee7c2b2fbc 100644
--- a/contrib/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/contrib/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -119,8 +119,9 @@ static void computePrevailingCopies(
};
for (auto &I : Index) {
- if (HasMultipleCopies(I.second))
- PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second);
+ if (HasMultipleCopies(I.second.SummaryList))
+ PrevailingCopy[I.first] =
+ getFirstDefinitionForLinker(I.second.SummaryList);
}
}
diff --git a/contrib/llvm/lib/MC/ConstantPools.cpp b/contrib/llvm/lib/MC/ConstantPools.cpp
index 8c94e2780998..ca5440237e49 100644
--- a/contrib/llvm/lib/MC/ConstantPools.cpp
+++ b/contrib/llvm/lib/MC/ConstantPools.cpp
@@ -57,6 +57,10 @@ const MCExpr *ConstantPool::addEntry(const MCExpr *Value, MCContext &Context,
bool ConstantPool::empty() { return Entries.empty(); }
+void ConstantPool::clearCache() {
+ CachedEntries.clear();
+}
+
//
// AssemblerConstantPools implementation
//
@@ -98,6 +102,13 @@ void AssemblerConstantPools::emitForCurrentSection(MCStreamer &Streamer) {
}
}
+void AssemblerConstantPools::clearCacheForCurrentSection(MCStreamer &Streamer) {
+ MCSection *Section = Streamer.getCurrentSectionOnly();
+ if (ConstantPool *CP = getConstantPool(Section)) {
+ CP->clearCache();
+ }
+}
+
const MCExpr *AssemblerConstantPools::addEntry(MCStreamer &Streamer,
const MCExpr *Expr,
unsigned Size, SMLoc Loc) {
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index f36a21bf1121..66ba853da2fe 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -287,6 +287,7 @@ public:
/// }
private:
+ bool isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc);
bool parseStatement(ParseStatementInfo &Info,
MCAsmParserSemaCallback *SI);
bool parseCurlyBlockScope(SmallVectorImpl<AsmRewrite>& AsmStrRewrites);
@@ -1192,6 +1193,31 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
llvm_unreachable("Invalid expression kind!");
}
+/// This function checks if the next token is <string> type or arithmetic.
+/// string that begin with character '<' must end with character '>'.
+/// otherwise it is arithmetics.
+/// If the function returns a 'true' value,
+/// the End argument will be filled with the last location pointed to the '>'
+/// character.
+
+/// There is a gap between the AltMacro's documentation and the single quote implementation.
+/// GCC does not fully support this feature and so we will not support it.
+/// TODO: Adding single quote as a string.
+bool AsmParser::isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc) {
+ assert((StrLoc.getPointer() != NULL) &&
+ "Argument to the function cannot be a NULL value");
+ const char *CharPtr = StrLoc.getPointer();
+ while ((*CharPtr != '>') && (*CharPtr != '\n') &&
+ (*CharPtr != '\r') && (*CharPtr != '\0')){
+ CharPtr++;
+ }
+ if (*CharPtr == '>') {
+ EndLoc = StrLoc.getFromPointer(CharPtr + 1);
+ return true;
+ }
+ return false;
+}
+
/// \brief Parse an expression and return it.
///
/// expr ::= expr &&,|| expr -> lowest.
@@ -2461,9 +2487,9 @@ bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
if (NamedParametersFound && FA.Name.empty())
return Error(IDLoc, "cannot mix positional and keyword arguments");
+ SMLoc StrLoc = Lexer.getLoc();
+ SMLoc EndLoc;
if (Lexer.IsaAltMacroMode() && Lexer.is(AsmToken::Percent)) {
- SMLoc StrLoc = Lexer.getLoc();
- SMLoc EndLoc;
const MCExpr *AbsoluteExp;
int64_t Value;
/// Eat '%'
@@ -2476,8 +2502,16 @@ bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
const char *EndChar = EndLoc.getPointer();
AsmToken newToken(AsmToken::Integer, StringRef(StrChar , EndChar - StrChar), Value);
FA.Value.push_back(newToken);
- }
- else if(parseMacroArgument(FA.Value, Vararg))
+ } else if (Lexer.IsaAltMacroMode() && Lexer.is(AsmToken::Less) &&
+ isAltmacroString(StrLoc, EndLoc)) {
+ const char *StrChar = StrLoc.getPointer();
+ const char *EndChar = EndLoc.getPointer();
+ jumpToLoc(EndLoc, CurBuffer);
+ /// Eat from '<' to '>'
+ Lex();
+ AsmToken newToken(AsmToken::String, StringRef(StrChar, EndChar - StrChar));
+ FA.Value.push_back(newToken);
+ } else if(parseMacroArgument(FA.Value, Vararg))
return true;
unsigned PI = Parameter;
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
index 1866aba9b21a..b1223e81be43 100644
--- a/contrib/llvm/lib/Object/COFFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -19,6 +19,7 @@
#include "llvm/Object/COFF.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
@@ -159,8 +160,7 @@ void COFFObjectFile::moveSymbolNext(DataRefImpl &Ref) const {
Expected<StringRef> COFFObjectFile::getSymbolName(DataRefImpl Ref) const {
COFFSymbolRef Symb = getCOFFSymbol(Ref);
StringRef Result;
- std::error_code EC = getSymbolName(Symb, Result);
- if (EC)
+ if (std::error_code EC = getSymbolName(Symb, Result))
return errorCodeToError(EC);
return Result;
}
@@ -1591,3 +1591,44 @@ std::error_code BaseRelocRef::getRVA(uint32_t &Result) const {
Result = Header->PageRVA + Entry[Index].getOffset();
return std::error_code();
}
+
+#define RETURN_IF_ERROR(X) \
+ if (auto EC = errorToErrorCode(X)) \
+ return EC;
+
+ErrorOr<ArrayRef<UTF16>> ResourceSectionRef::getDirStringAtOffset(uint32_t Offset) {
+ BinaryStreamReader Reader = BinaryStreamReader(BBS);
+ Reader.setOffset(Offset);
+ uint16_t Length;
+ RETURN_IF_ERROR(Reader.readInteger(Length));
+ ArrayRef<UTF16> RawDirString;
+ // Strings are stored as 2-byte aligned unicode characters but readFixedString
+ // assumes byte string, so we double length.
+ RETURN_IF_ERROR(Reader.readArray(RawDirString, Length));
+ return RawDirString;
+}
+
+ErrorOr<ArrayRef<UTF16>>
+ResourceSectionRef::getEntryNameString(const coff_resource_dir_entry &Entry) {
+ return getDirStringAtOffset(Entry.Identifier.getNameOffset());
+}
+
+ErrorOr<const coff_resource_dir_table &>
+ResourceSectionRef::getTableAtOffset(uint32_t Offset) {
+ const coff_resource_dir_table *Table = nullptr;
+
+ BinaryStreamReader Reader(BBS);
+ Reader.setOffset(Offset);
+ RETURN_IF_ERROR(Reader.readObject(Table));
+ assert(Table != nullptr);
+ return *Table;
+}
+
+ErrorOr<const coff_resource_dir_table &>
+ResourceSectionRef::getEntrySubDir(const coff_resource_dir_entry &Entry) {
+ return getTableAtOffset(Entry.Offset.value());
+}
+
+ErrorOr<const coff_resource_dir_table &> ResourceSectionRef::getBaseTable() {
+ return getTableAtOffset(0);
+}
diff --git a/contrib/llvm/lib/Object/WasmObjectFile.cpp b/contrib/llvm/lib/Object/WasmObjectFile.cpp
index 9f3486e58a11..39f8704aacf2 100644
--- a/contrib/llvm/lib/Object/WasmObjectFile.cpp
+++ b/contrib/llvm/lib/Object/WasmObjectFile.cpp
@@ -253,11 +253,12 @@ Error WasmObjectFile::parseNameSection(const uint8_t *Ptr, const uint8_t *End) {
case wasm::WASM_NAMES_FUNCTION: {
uint32_t Count = readVaruint32(Ptr);
while (Count--) {
- /*uint32_t Index =*/readVaruint32(Ptr);
+ uint32_t Index = readVaruint32(Ptr);
StringRef Name = readString(Ptr);
if (!Name.empty())
Symbols.emplace_back(Name,
- WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME);
+ WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME,
+ Sections.size(), Index);
}
break;
}
@@ -384,7 +385,7 @@ Error WasmObjectFile::parseTypeSection(const uint8_t *Ptr, const uint8_t *End) {
Error WasmObjectFile::parseImportSection(const uint8_t *Ptr, const uint8_t *End) {
uint32_t Count = readVaruint32(Ptr);
Imports.reserve(Count);
- while (Count--) {
+ for (uint32_t i = 0; i < Count; i++) {
wasm::WasmImport Im;
Im.Module = readString(Ptr);
Im.Field = readString(Ptr);
@@ -392,12 +393,14 @@ Error WasmObjectFile::parseImportSection(const uint8_t *Ptr, const uint8_t *End)
switch (Im.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
Im.SigIndex = readVaruint32(Ptr);
- Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::FUNCTION_IMPORT);
+ Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::FUNCTION_IMPORT,
+ Sections.size(), i);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
Im.GlobalType = readVarint7(Ptr);
Im.GlobalMutable = readVaruint1(Ptr);
- Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::GLOBAL_IMPORT);
+ Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::GLOBAL_IMPORT,
+ Sections.size(), i);
break;
default:
// TODO(sbc): Handle other kinds of imports
@@ -475,7 +478,7 @@ Error WasmObjectFile::parseGlobalSection(const uint8_t *Ptr, const uint8_t *End)
Error WasmObjectFile::parseExportSection(const uint8_t *Ptr, const uint8_t *End) {
uint32_t Count = readVaruint32(Ptr);
Exports.reserve(Count);
- while (Count--) {
+ for (uint32_t i = 0; i < Count; i++) {
wasm::WasmExport Ex;
Ex.Name = readString(Ptr);
Ex.Kind = readUint8(Ptr);
@@ -483,10 +486,12 @@ Error WasmObjectFile::parseExportSection(const uint8_t *Ptr, const uint8_t *End)
Exports.push_back(Ex);
switch (Ex.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
- Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::FUNCTION_EXPORT);
+ Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::FUNCTION_EXPORT,
+ Sections.size(), i);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
- Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::GLOBAL_EXPORT);
+ Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::GLOBAL_EXPORT,
+ Sections.size(), i);
break;
default:
// TODO(sbc): Handle other kinds of exports
@@ -597,20 +602,28 @@ const wasm::WasmObjectHeader &WasmObjectFile::getHeader() const {
void WasmObjectFile::moveSymbolNext(DataRefImpl &Symb) const { Symb.d.a++; }
uint32_t WasmObjectFile::getSymbolFlags(DataRefImpl Symb) const {
+ uint32_t Result = SymbolRef::SF_None;
const WasmSymbol &Sym = getWasmSymbol(Symb);
+
switch (Sym.Type) {
case WasmSymbol::SymbolType::FUNCTION_IMPORT:
- return object::SymbolRef::SF_Undefined | SymbolRef::SF_Executable;
+ Result |= SymbolRef::SF_Undefined | SymbolRef::SF_Executable;
+ break;
case WasmSymbol::SymbolType::FUNCTION_EXPORT:
- return object::SymbolRef::SF_Global | SymbolRef::SF_Executable;
+ Result |= SymbolRef::SF_Global | SymbolRef::SF_Executable;
+ break;
case WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME:
- return object::SymbolRef::SF_Executable;
+ Result |= SymbolRef::SF_Executable;
+ break;
case WasmSymbol::SymbolType::GLOBAL_IMPORT:
- return object::SymbolRef::SF_Undefined;
+ Result |= SymbolRef::SF_Undefined;
+ break;
case WasmSymbol::SymbolType::GLOBAL_EXPORT:
- return object::SymbolRef::SF_Global;
+ Result |= SymbolRef::SF_Global;
+ break;
}
- llvm_unreachable("Unknown WasmSymbol::SymbolType");
+
+ return Result;
}
basic_symbol_iterator WasmObjectFile::symbol_begin() const {
@@ -635,12 +648,12 @@ Expected<StringRef> WasmObjectFile::getSymbolName(DataRefImpl Symb) const {
}
Expected<uint64_t> WasmObjectFile::getSymbolAddress(DataRefImpl Symb) const {
- return (uint64_t)Symb.d.a;
+ return getSymbolValue(Symb);
}
uint64_t WasmObjectFile::getSymbolValueImpl(DataRefImpl Symb) const {
- llvm_unreachable("not yet implemented");
- return 0;
+ const WasmSymbol &Sym = getWasmSymbol(Symb);
+ return Sym.ElementIndex;
}
uint32_t WasmObjectFile::getSymbolAlignment(DataRefImpl Symb) const {
@@ -655,14 +668,27 @@ uint64_t WasmObjectFile::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
Expected<SymbolRef::Type>
WasmObjectFile::getSymbolType(DataRefImpl Symb) const {
- llvm_unreachable("not yet implemented");
- return errorCodeToError(object_error::invalid_symbol_index);
+ const WasmSymbol &Sym = getWasmSymbol(Symb);
+
+ switch (Sym.Type) {
+ case WasmSymbol::SymbolType::FUNCTION_IMPORT:
+ case WasmSymbol::SymbolType::FUNCTION_EXPORT:
+ case WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME:
+ return SymbolRef::ST_Function;
+ case WasmSymbol::SymbolType::GLOBAL_IMPORT:
+ case WasmSymbol::SymbolType::GLOBAL_EXPORT:
+ return SymbolRef::ST_Data;
+ }
+
+ llvm_unreachable("Unknown WasmSymbol::SymbolType");
+ return SymbolRef::ST_Other;
}
Expected<section_iterator>
WasmObjectFile::getSymbolSection(DataRefImpl Symb) const {
- llvm_unreachable("not yet implemented");
- return errorCodeToError(object_error::invalid_symbol_index);
+ DataRefImpl Ref;
+ Ref.d.a = getWasmSymbol(Symb).Section;
+ return section_iterator(SectionRef(Ref, this));
}
void WasmObjectFile::moveSectionNext(DataRefImpl &Sec) const { Sec.d.a++; }
diff --git a/contrib/llvm/lib/ObjectYAML/WasmYAML.cpp b/contrib/llvm/lib/ObjectYAML/WasmYAML.cpp
index 9b1ff7e5dc16..c5d1b438ee2a 100644
--- a/contrib/llvm/lib/ObjectYAML/WasmYAML.cpp
+++ b/contrib/llvm/lib/ObjectYAML/WasmYAML.cpp
@@ -50,7 +50,11 @@ static void commonSectionMapping(IO &IO, WasmYAML::Section &Section) {
static void sectionMapping(IO &IO, WasmYAML::CustomSection &Section) {
commonSectionMapping(IO, Section);
IO.mapRequired("Name", Section.Name);
- IO.mapRequired("Payload", Section.Payload);
+ if (Section.Name == "name") {
+ IO.mapOptional("FunctionNames", Section.FunctionNames);
+ } else {
+ IO.mapRequired("Payload", Section.Payload);
+ }
}
static void sectionMapping(IO &IO, WasmYAML::TypeSection &Section) {
@@ -226,6 +230,12 @@ void MappingTraits<WasmYAML::Relocation>::mapping(
IO.mapOptional("Addend", Relocation.Addend, 0);
}
+void MappingTraits<WasmYAML::NameEntry>::mapping(
+ IO &IO, WasmYAML::NameEntry &NameEntry) {
+ IO.mapRequired("Index", NameEntry.Index);
+ IO.mapRequired("Name", NameEntry.Name);
+}
+
void MappingTraits<WasmYAML::LocalDecl>::mapping(
IO &IO, WasmYAML::LocalDecl &LocalDecl) {
IO.mapRequired("Type", LocalDecl.Type);
diff --git a/contrib/llvm/lib/Passes/PassBuilder.cpp b/contrib/llvm/lib/Passes/PassBuilder.cpp
index 8db65f7f0e82..7076e751071d 100644
--- a/contrib/llvm/lib/Passes/PassBuilder.cpp
+++ b/contrib/llvm/lib/Passes/PassBuilder.cpp
@@ -505,6 +505,10 @@ PassBuilder::buildPerModuleDefaultPipeline(OptimizationLevel Level,
// the CGSCC pipeline.
MPM.addPass(RequireAnalysisPass<GlobalsAA, Module>());
+ // Require the ProfileSummaryAnalysis for the module so we can query it within
+ // the inliner pass.
+ MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+
// Now begin the main postorder CGSCC pipeline.
// FIXME: The current CGSCC pipeline has its origins in the legacy pass
// manager and trying to emulate its precise behavior. Much of this doesn't
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index fa81b28cd083..caa0691f9205 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -225,114 +225,17 @@ APInt& APInt::operator-=(uint64_t RHS) {
return clearUnusedBits();
}
-/// Multiplies an integer array, x, by a uint64_t integer and places the result
-/// into dest.
-/// @returns the carry out of the multiplication.
-/// @brief Multiply a multi-digit APInt by a single digit (64-bit) integer.
-static uint64_t mul_1(uint64_t dest[], uint64_t x[], unsigned len, uint64_t y) {
- // Split y into high 32-bit part (hy) and low 32-bit part (ly)
- uint64_t ly = y & 0xffffffffULL, hy = y >> 32;
- uint64_t carry = 0;
-
- // For each digit of x.
- for (unsigned i = 0; i < len; ++i) {
- // Split x into high and low words
- uint64_t lx = x[i] & 0xffffffffULL;
- uint64_t hx = x[i] >> 32;
- // hasCarry - A flag to indicate if there is a carry to the next digit.
- // hasCarry == 0, no carry
- // hasCarry == 1, has carry
- // hasCarry == 2, no carry and the calculation result == 0.
- uint8_t hasCarry = 0;
- dest[i] = carry + lx * ly;
- // Determine if the add above introduces carry.
- hasCarry = (dest[i] < carry) ? 1 : 0;
- carry = hx * ly + (dest[i] >> 32) + (hasCarry ? (1ULL << 32) : 0);
- // The upper limit of carry can be (2^32 - 1)(2^32 - 1) +
- // (2^32 - 1) + 2^32 = 2^64.
- hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0);
-
- carry += (lx * hy) & 0xffffffffULL;
- dest[i] = (carry << 32) | (dest[i] & 0xffffffffULL);
- carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0) +
- (carry >> 32) + ((lx * hy) >> 32) + hx * hy;
- }
- return carry;
-}
-
-/// Multiplies integer array x by integer array y and stores the result into
-/// the integer array dest. Note that dest's size must be >= xlen + ylen.
-/// @brief Generalized multiplication of integer arrays.
-static void mul(uint64_t dest[], uint64_t x[], unsigned xlen, uint64_t y[],
- unsigned ylen) {
- dest[xlen] = mul_1(dest, x, xlen, y[0]);
- for (unsigned i = 1; i < ylen; ++i) {
- uint64_t ly = y[i] & 0xffffffffULL, hy = y[i] >> 32;
- uint64_t carry = 0, lx = 0, hx = 0;
- for (unsigned j = 0; j < xlen; ++j) {
- lx = x[j] & 0xffffffffULL;
- hx = x[j] >> 32;
- // hasCarry - A flag to indicate if has carry.
- // hasCarry == 0, no carry
- // hasCarry == 1, has carry
- // hasCarry == 2, no carry and the calculation result == 0.
- uint8_t hasCarry = 0;
- uint64_t resul = carry + lx * ly;
- hasCarry = (resul < carry) ? 1 : 0;
- carry = (hasCarry ? (1ULL << 32) : 0) + hx * ly + (resul >> 32);
- hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0);
-
- carry += (lx * hy) & 0xffffffffULL;
- resul = (carry << 32) | (resul & 0xffffffffULL);
- dest[i+j] += resul;
- carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0)+
- (carry >> 32) + (dest[i+j] < resul ? 1 : 0) +
- ((lx * hy) >> 32) + hx * hy;
- }
- dest[i+xlen] = carry;
- }
-}
-
-APInt& APInt::operator*=(const APInt& RHS) {
+APInt APInt::operator*(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord()) {
- U.VAL *= RHS.U.VAL;
- clearUnusedBits();
- return *this;
- }
-
- // Get some bit facts about LHS and check for zero
- unsigned lhsBits = getActiveBits();
- unsigned lhsWords = !lhsBits ? 0 : whichWord(lhsBits - 1) + 1;
- if (!lhsWords)
- // 0 * X ===> 0
- return *this;
-
- // Get some bit facts about RHS and check for zero
- unsigned rhsBits = RHS.getActiveBits();
- unsigned rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1;
- if (!rhsWords) {
- // X * 0 ===> 0
- clearAllBits();
- return *this;
- }
-
- // Allocate space for the result
- unsigned destWords = rhsWords + lhsWords;
- uint64_t *dest = getMemory(destWords);
+ if (isSingleWord())
+ return APInt(BitWidth, U.VAL * RHS.U.VAL);
- // Perform the long multiply
- mul(dest, U.pVal, lhsWords, RHS.U.pVal, rhsWords);
+ APInt Result(getMemory(getNumWords()), getBitWidth());
- // Copy result back into *this
- clearAllBits();
- unsigned wordsToCopy = destWords >= getNumWords() ? getNumWords() : destWords;
- memcpy(U.pVal, dest, wordsToCopy * APINT_WORD_SIZE);
- clearUnusedBits();
+ tcMultiply(Result.U.pVal, U.pVal, RHS.U.pVal, getNumWords());
- // delete dest array and return
- delete[] dest;
- return *this;
+ Result.clearUnusedBits();
+ return Result;
}
void APInt::AndAssignSlowCase(const APInt& RHS) {
@@ -347,13 +250,20 @@ void APInt::XorAssignSlowCase(const APInt& RHS) {
tcXor(U.pVal, RHS.U.pVal, getNumWords());
}
-APInt APInt::operator*(const APInt& RHS) const {
+APInt& APInt::operator*=(const APInt& RHS) {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord())
- return APInt(BitWidth, U.VAL * RHS.U.VAL);
- APInt Result(*this);
- Result *= RHS;
- return Result;
+ *this = *this * RHS;
+ return *this;
+}
+
+APInt& APInt::operator*=(uint64_t RHS) {
+ if (isSingleWord()) {
+ U.VAL *= RHS;
+ } else {
+ unsigned NumWords = getNumWords();
+ tcMultiplyPart(U.pVal, U.pVal, RHS, 0, NumWords, NumWords, false);
+ }
+ return clearUnusedBits();
}
bool APInt::EqualSlowCase(const APInt& RHS) const {
@@ -1932,10 +1842,6 @@ void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
// Figure out if we can shift instead of multiply
unsigned shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0);
- // Set up an APInt for the radix multiplier outside the loop so we don't
- // constantly construct/destruct it.
- APInt apradix(getBitWidth(), radix);
-
// Enter digit traversal loop
for (StringRef::iterator e = str.end(); p != e; ++p) {
unsigned digit = getDigit(*p, radix);
@@ -1946,7 +1852,7 @@ void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
if (shift)
*this <<= shift;
else
- *this *= apradix;
+ *this *= radix;
}
// Add in the digit we just interpreted
@@ -2346,10 +2252,9 @@ int APInt::tcMultiplyPart(WordType *dst, const WordType *src,
assert(dstParts <= srcParts + 1);
/* N loops; minimum of dstParts and srcParts. */
- unsigned n = dstParts < srcParts ? dstParts: srcParts;
+ unsigned n = std::min(dstParts, srcParts);
- unsigned i;
- for (i = 0; i < n; i++) {
+ for (unsigned i = 0; i < n; i++) {
WordType low, mid, high, srcPart;
/* [ LOW, HIGH ] = MULTIPLIER * SRC[i] + DST[i] + CARRY.
@@ -2400,27 +2305,27 @@ int APInt::tcMultiplyPart(WordType *dst, const WordType *src,
carry = high;
}
- if (i < dstParts) {
+ if (srcParts < dstParts) {
/* Full multiplication, there is no overflow. */
- assert(i + 1 == dstParts);
- dst[i] = carry;
- return 0;
- } else {
- /* We overflowed if there is carry. */
- if (carry)
- return 1;
-
- /* We would overflow if any significant unwritten parts would be
- non-zero. This is true if any remaining src parts are non-zero
- and the multiplier is non-zero. */
- if (multiplier)
- for (; i < srcParts; i++)
- if (src[i])
- return 1;
-
- /* We fitted in the narrow destination. */
+ assert(srcParts + 1 == dstParts);
+ dst[srcParts] = carry;
return 0;
}
+
+ /* We overflowed if there is carry. */
+ if (carry)
+ return 1;
+
+ /* We would overflow if any significant unwritten parts would be
+ non-zero. This is true if any remaining src parts are non-zero
+ and the multiplier is non-zero. */
+ if (multiplier)
+ for (unsigned i = dstParts; i < srcParts; i++)
+ if (src[i])
+ return 1;
+
+ /* We fitted in the narrow destination. */
+ return 0;
}
/* DST = LHS * RHS, where DST has the same width as the operands and
@@ -2449,20 +2354,19 @@ unsigned APInt::tcFullMultiply(WordType *dst, const WordType *lhs,
const WordType *rhs, unsigned lhsParts,
unsigned rhsParts) {
/* Put the narrower number on the LHS for less loops below. */
- if (lhsParts > rhsParts) {
+ if (lhsParts > rhsParts)
return tcFullMultiply (dst, rhs, lhs, rhsParts, lhsParts);
- } else {
- assert(dst != lhs && dst != rhs);
- tcSet(dst, 0, rhsParts);
+ assert(dst != lhs && dst != rhs);
- for (unsigned i = 0; i < lhsParts; i++)
- tcMultiplyPart(&dst[i], rhs, lhs[i], 0, rhsParts, rhsParts + 1, true);
+ tcSet(dst, 0, rhsParts);
- unsigned n = lhsParts + rhsParts;
+ for (unsigned i = 0; i < lhsParts; i++)
+ tcMultiplyPart(&dst[i], rhs, lhs[i], 0, rhsParts, rhsParts + 1, true);
- return n - (dst[n - 1] == 0);
- }
+ unsigned n = lhsParts + rhsParts;
+
+ return n - (dst[n - 1] == 0);
}
/* If RHS is zero LHS and REMAINDER are left unchanged, return one.
diff --git a/contrib/llvm/lib/Support/TargetParser.cpp b/contrib/llvm/lib/Support/TargetParser.cpp
index bba7c6d0d604..b16351906a4c 100644
--- a/contrib/llvm/lib/Support/TargetParser.cpp
+++ b/contrib/llvm/lib/Support/TargetParser.cpp
@@ -422,8 +422,10 @@ unsigned llvm::AArch64::getDefaultExtensions(StringRef CPU, unsigned ArchKind) {
return AArch64ARCHNames[ArchKind].ArchBaseExtensions;
return StringSwitch<unsigned>(CPU)
-#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
- .Case(NAME, DEFAULT_EXT)
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
+ .Case(NAME, \
+ AArch64ARCHNames[(unsigned)AArch64::ArchKind::ID].ArchBaseExtensions | \
+ DEFAULT_EXT)
#include "llvm/Support/AArch64TargetParser.def"
.Default(AArch64::AEK_INVALID);
}
diff --git a/contrib/llvm/lib/Support/Unix/DynamicLibrary.inc b/contrib/llvm/lib/Support/Unix/DynamicLibrary.inc
index a0110e7044ee..a0526fa2c1b8 100644
--- a/contrib/llvm/lib/Support/Unix/DynamicLibrary.inc
+++ b/contrib/llvm/lib/Support/Unix/DynamicLibrary.inc
@@ -31,7 +31,7 @@ void *DynamicLibrary::HandleSet::DLOpen(const char *File, std::string *Err) {
#ifdef __CYGWIN__
// Cygwin searches symbols only in the main
// with the handle of dlopen(NULL, RTLD_GLOBAL).
- if (!Filename)
+ if (!File)
Handle = RTLD_DEFAULT;
#endif
diff --git a/contrib/llvm/lib/Support/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc
index 93f8982196b3..fa28ba1b6ab6 100644
--- a/contrib/llvm/lib/Support/Unix/Path.inc
+++ b/contrib/llvm/lib/Support/Unix/Path.inc
@@ -421,14 +421,15 @@ std::error_code resize_file(int FD, uint64_t Size) {
#if defined(HAVE_POSIX_FALLOCATE)
// If we have posix_fallocate use it. Unlike ftruncate it always allocates
// space, so we get an error if the disk is full.
- if (int Err = ::posix_fallocate(FD, 0, Size))
- return std::error_code(Err, std::generic_category());
-#else
+ if (int Err = ::posix_fallocate(FD, 0, Size)) {
+ if (Err != EOPNOTSUPP)
+ return std::error_code(Err, std::generic_category());
+ }
+#endif
// Use ftruncate as a fallback. It may or may not allocate space. At least on
// OS X with HFS+ it does.
if (::ftruncate(FD, Size) == -1)
return std::error_code(errno, std::generic_category());
-#endif
return std::error_code();
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64.h b/contrib/llvm/lib/Target/AArch64/AArch64.h
index b44b13e36e15..3e0e3978b90b 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64.h
@@ -41,7 +41,6 @@ FunctionPass *createAArch64LoadStoreOptimizationPass();
FunctionPass *createAArch64VectorByElementOptPass();
ModulePass *createAArch64PromoteConstantPass();
FunctionPass *createAArch64ConditionOptimizerPass();
-FunctionPass *createAArch64AddressTypePromotionPass();
FunctionPass *createAArch64A57FPLoadBalancing();
FunctionPass *createAArch64A53Fix835769();
@@ -54,7 +53,6 @@ createAArch64InstructionSelector(const AArch64TargetMachine &,
void initializeAArch64A53Fix835769Pass(PassRegistry&);
void initializeAArch64A57FPLoadBalancingPass(PassRegistry&);
-void initializeAArch64AddressTypePromotionPass(PassRegistry&);
void initializeAArch64AdvSIMDScalarPass(PassRegistry&);
void initializeAArch64CollectLOHPass(PassRegistry&);
void initializeAArch64ConditionalComparesPass(PassRegistry&);
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64.td b/contrib/llvm/lib/Target/AArch64/AArch64.td
index 519ca2894683..73f2b6a25f66 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64.td
@@ -358,7 +358,6 @@ def ProcThunderXT83 : SubtargetFeature<"thunderxt83", "ARMProcFamily",
FeatureNEON]>;
def : ProcessorModel<"generic", NoSchedModel, [
- FeatureCRC,
FeatureFPARMv8,
FeatureNEON,
FeaturePerfMon,
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
deleted file mode 100644
index e1b8ee6d03c3..000000000000
--- a/contrib/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
+++ /dev/null
@@ -1,493 +0,0 @@
-//===-- AArch64AddressTypePromotion.cpp --- Promote type for addr accesses -==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass tries to promote the computations use to obtained a sign extended
-// value used into memory accesses.
-// E.g.
-// a = add nsw i32 b, 3
-// d = sext i32 a to i64
-// e = getelementptr ..., i64 d
-//
-// =>
-// f = sext i32 b to i64
-// a = add nsw i64 f, 3
-// e = getelementptr ..., i64 a
-//
-// This is legal to do if the computations are marked with either nsw or nuw
-// markers. Moreover, the current heuristic is simple: it does not create new
-// sext operations, i.e., it gives up when a sext would have forked (e.g., if a
-// = add i32 b, c, two sexts are required to promote the computation).
-//
-// FIXME: This pass may be useful for other targets too.
-// ===---------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/InstrTypes.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cassert>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "aarch64-type-promotion"
-
-static cl::opt<bool>
-EnableMerge("aarch64-type-promotion-merge", cl::Hidden,
- cl::desc("Enable merging of redundant sexts when one is dominating"
- " the other."),
- cl::init(true));
-
-#define AARCH64_TYPE_PROMO_NAME "AArch64 Address Type Promotion"
-
-//===----------------------------------------------------------------------===//
-// AArch64AddressTypePromotion
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AArch64AddressTypePromotion : public FunctionPass {
-public:
- static char ID;
-
- AArch64AddressTypePromotion() : FunctionPass(ID) {
- initializeAArch64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
- }
-
- StringRef getPassName() const override { return AARCH64_TYPE_PROMO_NAME; }
-
- /// Iterate over the functions and promote the computation of interesting
- // sext instructions.
- bool runOnFunction(Function &F) override;
-
-private:
- /// The current function.
- Function *Func = nullptr;
-
- /// Filter out all sexts that does not have this type.
- /// Currently initialized with Int64Ty.
- Type *ConsideredSExtType = nullptr;
-
- // This transformation requires dominator info.
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- FunctionPass::getAnalysisUsage(AU);
- }
-
- typedef SmallPtrSet<Instruction *, 32> SetOfInstructions;
- typedef SmallVector<Instruction *, 16> Instructions;
- typedef DenseMap<Value *, Instructions> ValueToInsts;
-
- /// Check if it is profitable to move a sext through this instruction.
- /// Currently, we consider it is profitable if:
- /// - Inst is used only once (no need to insert truncate).
- /// - Inst has only one operand that will require a sext operation (we do
- /// do not create new sext operation).
- bool shouldGetThrough(const Instruction *Inst);
-
- /// Check if it is possible and legal to move a sext through this
- /// instruction.
- /// Current heuristic considers that we can get through:
- /// - Arithmetic operation marked with the nsw or nuw flag.
- /// - Other sext operation.
- /// - Truncate operation if it was just dropping sign extended bits.
- bool canGetThrough(const Instruction *Inst);
-
- /// Move sext operations through safe to sext instructions.
- bool propagateSignExtension(Instructions &SExtInsts);
-
- /// Is this sext should be considered for code motion.
- /// We look for sext with ConsideredSExtType and uses in at least one
- // GetElementPtrInst.
- bool shouldConsiderSExt(const Instruction *SExt) const;
-
- /// Collect all interesting sext operations, i.e., the ones with the right
- /// type and used in memory accesses.
- /// More precisely, a sext instruction is considered as interesting if it
- /// is used in a "complex" getelementptr or it exits at least another
- /// sext instruction that sign extended the same initial value.
- /// A getelementptr is considered as "complex" if it has more than 2
- // operands.
- void analyzeSExtension(Instructions &SExtInsts);
-
- /// Merge redundant sign extension operations in common dominator.
- void mergeSExts(ValueToInsts &ValToSExtendedUses,
- SetOfInstructions &ToRemove);
-};
-
-} // end anonymous namespace
-
-char AArch64AddressTypePromotion::ID = 0;
-
-INITIALIZE_PASS_BEGIN(AArch64AddressTypePromotion, "aarch64-type-promotion",
- AARCH64_TYPE_PROMO_NAME, false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_END(AArch64AddressTypePromotion, "aarch64-type-promotion",
- AARCH64_TYPE_PROMO_NAME, false, false)
-
-FunctionPass *llvm::createAArch64AddressTypePromotionPass() {
- return new AArch64AddressTypePromotion();
-}
-
-bool AArch64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
- if (isa<SExtInst>(Inst))
- return true;
-
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
- (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap()))
- return true;
-
- // sext(trunc(sext)) --> sext
- if (isa<TruncInst>(Inst) && isa<SExtInst>(Inst->getOperand(0))) {
- const Instruction *Opnd = cast<Instruction>(Inst->getOperand(0));
- // Check that the truncate just drop sign extended bits.
- if (Inst->getType()->getIntegerBitWidth() >=
- Opnd->getOperand(0)->getType()->getIntegerBitWidth() &&
- Inst->getOperand(0)->getType()->getIntegerBitWidth() <=
- ConsideredSExtType->getIntegerBitWidth())
- return true;
- }
-
- return false;
-}
-
-bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
- // If the type of the sext is the same as the considered one, this sext
- // will become useless.
- // Otherwise, we will have to do something to preserve the original value,
- // unless it is used once.
- if (isa<SExtInst>(Inst) &&
- (Inst->getType() == ConsideredSExtType || Inst->hasOneUse()))
- return true;
-
- // If the Inst is used more that once, we may need to insert truncate
- // operations and we don't do that at the moment.
- if (!Inst->hasOneUse())
- return false;
-
- // This truncate is used only once, thus if we can get thourgh, it will become
- // useless.
- if (isa<TruncInst>(Inst))
- return true;
-
- // If both operands are not constant, a new sext will be created here.
- // Current heuristic is: each step should be profitable.
- // Therefore we don't allow to increase the number of sext even if it may
- // be profitable later on.
- if (isa<BinaryOperator>(Inst) && isa<ConstantInt>(Inst->getOperand(1)))
- return true;
-
- return false;
-}
-
-static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
- return !(isa<SelectInst>(Inst) && OpIdx == 0);
-}
-
-bool
-AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
- if (SExt->getType() != ConsideredSExtType)
- return false;
-
- for (const User *U : SExt->users()) {
- if (isa<GetElementPtrInst>(U))
- return true;
- }
-
- return false;
-}
-
-// Input:
-// - SExtInsts contains all the sext instructions that are used directly in
-// GetElementPtrInst, i.e., access to memory.
-// Algorithm:
-// - For each sext operation in SExtInsts:
-// Let var be the operand of sext.
-// while it is profitable (see shouldGetThrough), legal, and safe
-// (see canGetThrough) to move sext through var's definition:
-// * promote the type of var's definition.
-// * fold var into sext uses.
-// * move sext above var's definition.
-// * update sext operand to use the operand of var that should be sign
-// extended (by construction there is only one).
-//
-// E.g.,
-// a = ... i32 c, 3
-// b = sext i32 a to i64 <- is it legal/safe/profitable to get through 'a'
-// ...
-// = b
-// => Yes, update the code
-// b = sext i32 c to i64
-// a = ... i64 b, 3
-// ...
-// = a
-// Iterate on 'c'.
-bool
-AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
- DEBUG(dbgs() << "*** Propagate Sign Extension ***\n");
-
- bool LocalChange = false;
- SetOfInstructions ToRemove;
- ValueToInsts ValToSExtendedUses;
- while (!SExtInsts.empty()) {
- // Get through simple chain.
- Instruction *SExt = SExtInsts.pop_back_val();
-
- DEBUG(dbgs() << "Consider:\n" << *SExt << '\n');
-
- // If this SExt has already been merged continue.
- if (SExt->use_empty() && ToRemove.count(SExt)) {
- DEBUG(dbgs() << "No uses => marked as delete\n");
- continue;
- }
-
- // Now try to get through the chain of definitions.
- while (auto *Inst = dyn_cast<Instruction>(SExt->getOperand(0))) {
- DEBUG(dbgs() << "Try to get through:\n" << *Inst << '\n');
- if (!canGetThrough(Inst) || !shouldGetThrough(Inst)) {
- // We cannot get through something that is not an Instruction
- // or not safe to SExt.
- DEBUG(dbgs() << "Cannot get through\n");
- break;
- }
-
- LocalChange = true;
- // If this is a sign extend, it becomes useless.
- if (isa<SExtInst>(Inst) || isa<TruncInst>(Inst)) {
- DEBUG(dbgs() << "SExt or trunc, mark it as to remove\n");
- // We cannot use replaceAllUsesWith here because we may trigger some
- // assertion on the type as all involved sext operation may have not
- // been moved yet.
- while (!Inst->use_empty()) {
- Use &U = *Inst->use_begin();
- Instruction *User = dyn_cast<Instruction>(U.getUser());
- assert(User && "User of sext is not an Instruction!");
- User->setOperand(U.getOperandNo(), SExt);
- }
- ToRemove.insert(Inst);
- SExt->setOperand(0, Inst->getOperand(0));
- SExt->moveBefore(Inst);
- continue;
- }
-
- // Get through the Instruction:
- // 1. Update its type.
- // 2. Replace the uses of SExt by Inst.
- // 3. Sign extend each operand that needs to be sign extended.
-
- // Step #1.
- Inst->mutateType(SExt->getType());
- // Step #2.
- SExt->replaceAllUsesWith(Inst);
- // Step #3.
- Instruction *SExtForOpnd = SExt;
-
- DEBUG(dbgs() << "Propagate SExt to operands\n");
- for (int OpIdx = 0, EndOpIdx = Inst->getNumOperands(); OpIdx != EndOpIdx;
- ++OpIdx) {
- DEBUG(dbgs() << "Operand:\n" << *(Inst->getOperand(OpIdx)) << '\n');
- if (Inst->getOperand(OpIdx)->getType() == SExt->getType() ||
- !shouldSExtOperand(Inst, OpIdx)) {
- DEBUG(dbgs() << "No need to propagate\n");
- continue;
- }
- // Check if we can statically sign extend the operand.
- Value *Opnd = Inst->getOperand(OpIdx);
- if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
- DEBUG(dbgs() << "Statically sign extend\n");
- Inst->setOperand(OpIdx, ConstantInt::getSigned(SExt->getType(),
- Cst->getSExtValue()));
- continue;
- }
- // UndefValue are typed, so we have to statically sign extend them.
- if (isa<UndefValue>(Opnd)) {
- DEBUG(dbgs() << "Statically sign extend\n");
- Inst->setOperand(OpIdx, UndefValue::get(SExt->getType()));
- continue;
- }
-
- // Otherwise we have to explicity sign extend it.
- assert(SExtForOpnd &&
- "Only one operand should have been sign extended");
-
- SExtForOpnd->setOperand(0, Opnd);
-
- DEBUG(dbgs() << "Move before:\n" << *Inst << "\nSign extend\n");
- // Move the sign extension before the insertion point.
- SExtForOpnd->moveBefore(Inst);
- Inst->setOperand(OpIdx, SExtForOpnd);
- // If more sext are required, new instructions will have to be created.
- SExtForOpnd = nullptr;
- }
- if (SExtForOpnd == SExt) {
- DEBUG(dbgs() << "Sign extension is useless now\n");
- ToRemove.insert(SExt);
- break;
- }
- }
-
- // If the use is already of the right type, connect its uses to its argument
- // and delete it.
- // This can happen for an Instruction all uses of which are sign extended.
- if (!ToRemove.count(SExt) &&
- SExt->getType() == SExt->getOperand(0)->getType()) {
- DEBUG(dbgs() << "Sign extension is useless, attach its use to "
- "its argument\n");
- SExt->replaceAllUsesWith(SExt->getOperand(0));
- ToRemove.insert(SExt);
- } else
- ValToSExtendedUses[SExt->getOperand(0)].push_back(SExt);
- }
-
- if (EnableMerge)
- mergeSExts(ValToSExtendedUses, ToRemove);
-
- // Remove all instructions marked as ToRemove.
- for (Instruction *I: ToRemove)
- I->eraseFromParent();
- return LocalChange;
-}
-
-void AArch64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
- SetOfInstructions &ToRemove) {
- DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
-
- for (auto &Entry : ValToSExtendedUses) {
- Instructions &Insts = Entry.second;
- Instructions CurPts;
- for (Instruction *Inst : Insts) {
- if (ToRemove.count(Inst))
- continue;
- bool inserted = false;
- for (auto &Pt : CurPts) {
- if (DT.dominates(Inst, Pt)) {
- DEBUG(dbgs() << "Replace all uses of:\n" << *Pt << "\nwith:\n"
- << *Inst << '\n');
- Pt->replaceAllUsesWith(Inst);
- ToRemove.insert(Pt);
- Pt = Inst;
- inserted = true;
- break;
- }
- if (!DT.dominates(Pt, Inst))
- // Give up if we need to merge in a common dominator as the
- // expermients show it is not profitable.
- continue;
-
- DEBUG(dbgs() << "Replace all uses of:\n" << *Inst << "\nwith:\n"
- << *Pt << '\n');
- Inst->replaceAllUsesWith(Pt);
- ToRemove.insert(Inst);
- inserted = true;
- break;
- }
- if (!inserted)
- CurPts.push_back(Inst);
- }
- }
-}
-
-void AArch64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
- DEBUG(dbgs() << "*** Analyze Sign Extensions ***\n");
-
- DenseMap<Value *, Instruction *> SeenChains;
-
- for (auto &BB : *Func) {
- for (auto &II : BB) {
- Instruction *SExt = &II;
-
- // Collect all sext operation per type.
- if (!isa<SExtInst>(SExt) || !shouldConsiderSExt(SExt))
- continue;
-
- DEBUG(dbgs() << "Found:\n" << (*SExt) << '\n');
-
- // Cases where we actually perform the optimization:
- // 1. SExt is used in a getelementptr with more than 2 operand =>
- // likely we can merge some computation if they are done on 64 bits.
- // 2. The beginning of the SExt chain is SExt several time. =>
- // code sharing is possible.
-
- bool insert = false;
- // #1.
- for (const User *U : SExt->users()) {
- const Instruction *Inst = dyn_cast<GetElementPtrInst>(U);
- if (Inst && Inst->getNumOperands() > 2) {
- DEBUG(dbgs() << "Interesting use in GetElementPtrInst\n" << *Inst
- << '\n');
- insert = true;
- break;
- }
- }
-
- // #2.
- // Check the head of the chain.
- Instruction *Inst = SExt;
- Value *Last;
- do {
- int OpdIdx = 0;
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (BinOp && isa<ConstantInt>(BinOp->getOperand(0)))
- OpdIdx = 1;
- Last = Inst->getOperand(OpdIdx);
- Inst = dyn_cast<Instruction>(Last);
- } while (Inst && canGetThrough(Inst) && shouldGetThrough(Inst));
-
- DEBUG(dbgs() << "Head of the chain:\n" << *Last << '\n');
- DenseMap<Value *, Instruction *>::iterator AlreadySeen =
- SeenChains.find(Last);
- if (insert || AlreadySeen != SeenChains.end()) {
- DEBUG(dbgs() << "Insert\n");
- SExtInsts.push_back(SExt);
- if (AlreadySeen != SeenChains.end() && AlreadySeen->second != nullptr) {
- DEBUG(dbgs() << "Insert chain member\n");
- SExtInsts.push_back(AlreadySeen->second);
- SeenChains[Last] = nullptr;
- }
- } else {
- DEBUG(dbgs() << "Record its chain membership\n");
- SeenChains[Last] = SExt;
- }
- }
- }
-}
-
-bool AArch64AddressTypePromotion::runOnFunction(Function &F) {
- if (skipFunction(F))
- return false;
-
- if (F.isDeclaration())
- return false;
- Func = &F;
- ConsideredSExtType = Type::getInt64Ty(Func->getContext());
-
- DEBUG(dbgs() << "*** " << getPassName() << ": " << Func->getName() << '\n');
-
- Instructions SExtInsts;
- analyzeSExtension(SExtInsts);
- return propagateSignExtension(SExtInsts);
-}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index eb1bbcafe6e6..4b1bb27dce73 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -758,6 +758,9 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) {
setOperationAction(ISD::FP_TO_SINT, VT, Custom);
setOperationAction(ISD::FP_TO_UINT, VT, Custom);
+ if (!VT.isFloatingPoint())
+ setOperationAction(ISD::ABS, VT, Legal);
+
// [SU][MIN|MAX] are available for all NEON types apart from i64.
if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
@@ -2482,6 +2485,9 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
+ case Intrinsic::aarch64_neon_abs:
+ return DAG.getNode(ISD::ABS, dl, Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::aarch64_neon_smax:
return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ce401206e517..902b08844216 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2734,60 +2734,36 @@ defm FMOV : FPMoveImmediate<"fmov">;
defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
int_aarch64_neon_uabd>;
// Match UABDL in log2-shuffle patterns.
+def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
+ (zext (v8i8 V64:$opB))))),
+ (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
(v8i16 (add (sub (zext (v8i8 V64:$opA)),
(zext (v8i8 V64:$opB))),
(AArch64vashr v8i16:$src, (i32 15))))),
(UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
+def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
+ (zext (extract_high_v16i8 V128:$opB))))),
+ (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
(v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
(zext (extract_high_v16i8 V128:$opB))),
(AArch64vashr v8i16:$src, (i32 15))))),
(UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
- (v4i32 (add (sub (zext (v4i16 V64:$opA)),
- (zext (v4i16 V64:$opB))),
- (AArch64vashr v4i32:$src, (i32 31))))),
+def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
+ (zext (v4i16 V64:$opB))))),
(UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
- (v4i32 (add (sub (zext (extract_high_v8i16 V128:$opA)),
- (zext (extract_high_v8i16 V128:$opB))),
- (AArch64vashr v4i32:$src, (i32 31))))),
+def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
+ (zext (extract_high_v8i16 V128:$opB))))),
(UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
- (v2i64 (add (sub (zext (v2i32 V64:$opA)),
- (zext (v2i32 V64:$opB))),
- (AArch64vashr v2i64:$src, (i32 63))))),
+def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
+ (zext (v2i32 V64:$opB))))),
(UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
- (v2i64 (add (sub (zext (extract_high_v4i32 V128:$opA)),
- (zext (extract_high_v4i32 V128:$opB))),
- (AArch64vashr v2i64:$src, (i32 63))))),
+def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
+ (zext (extract_high_v4i32 V128:$opB))))),
(UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
-defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
-def : Pat<(xor (v8i8 (AArch64vashr V64:$src, (i32 7))),
- (v8i8 (add V64:$src, (AArch64vashr V64:$src, (i32 7))))),
- (ABSv8i8 V64:$src)>;
-def : Pat<(xor (v4i16 (AArch64vashr V64:$src, (i32 15))),
- (v4i16 (add V64:$src, (AArch64vashr V64:$src, (i32 15))))),
- (ABSv4i16 V64:$src)>;
-def : Pat<(xor (v2i32 (AArch64vashr V64:$src, (i32 31))),
- (v2i32 (add V64:$src, (AArch64vashr V64:$src, (i32 31))))),
- (ABSv2i32 V64:$src)>;
-def : Pat<(xor (v16i8 (AArch64vashr V128:$src, (i32 7))),
- (v16i8 (add V128:$src, (AArch64vashr V128:$src, (i32 7))))),
- (ABSv16i8 V128:$src)>;
-def : Pat<(xor (v8i16 (AArch64vashr V128:$src, (i32 15))),
- (v8i16 (add V128:$src, (AArch64vashr V128:$src, (i32 15))))),
- (ABSv8i16 V128:$src)>;
-def : Pat<(xor (v4i32 (AArch64vashr V128:$src, (i32 31))),
- (v4i32 (add V128:$src, (AArch64vashr V128:$src, (i32 31))))),
- (ABSv4i32 V128:$src)>;
-def : Pat<(xor (v2i64 (AArch64vashr V128:$src, (i32 63))),
- (v2i64 (add V128:$src, (AArch64vashr V128:$src, (i32 63))))),
- (ABSv2i64 V128:$src)>;
-
+defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
@@ -3359,7 +3335,7 @@ def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
// Advanced SIMD two scalar instructions.
//===----------------------------------------------------------------------===//
-defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
+defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>;
defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index 6f9021c4a030..5f895903da6f 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -260,15 +260,15 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
if (MI.getNumOperands() != 3)
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
/*NumOperands*/ 3);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
/*NumOperands*/ 3);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
return AltMappings;
}
case TargetOpcode::G_BITCAST: {
@@ -282,29 +282,29 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1,
getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1,
getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping GPRToFPRMapping(
+ const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping FPRToGPRMapping(
+ const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
- AltMappings.emplace_back(std::move(GPRToFPRMapping));
- AltMappings.emplace_back(std::move(FPRToGPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
+ AltMappings.push_back(&GPRToFPRMapping);
+ AltMappings.push_back(&FPRToGPRMapping);
return AltMappings;
}
case TargetOpcode::G_LOAD: {
@@ -318,21 +318,21 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1,
getOperandsMapping({getValueMapping(PMI_FirstGPR, Size),
// Addresses are GPR 64-bit.
getValueMapping(PMI_FirstGPR, 64)}),
/*NumOperands*/ 2);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1,
getOperandsMapping({getValueMapping(PMI_FirstFPR, Size),
// Addresses are GPR 64-bit.
getValueMapping(PMI_FirstGPR, 64)}),
/*NumOperands*/ 2);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
return AltMappings;
}
default:
@@ -373,8 +373,9 @@ static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
return false;
}
-RegisterBankInfo::InstructionMapping
-AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
+const RegisterBankInfo::InstructionMapping &
+AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
+ const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -411,11 +412,11 @@ AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
}
#endif // End NDEBUG.
- return InstructionMapping{DefaultMappingID, 1, getValueMapping(RBIdx, Size),
- NumOperands};
+ return getInstructionMapping(DefaultMappingID, 1,
+ getValueMapping(RBIdx, Size), NumOperands);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
@@ -424,7 +425,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping =
+ getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -462,15 +464,15 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
const RegisterBank &SrcRB =
SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
- return InstructionMapping{
+ return getInstructionMapping(
DefaultMappingID, copyCost(DstRB, SrcRB, Size),
getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
- /*NumOperands*/ 2};
+ /*NumOperands*/ 2);
}
case TargetOpcode::G_SEQUENCE:
// FIXME: support this, but the generic code is really not going to do
// anything sane.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
default:
break;
}
@@ -533,19 +535,17 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
// Finally construct the computed mapping.
- RegisterBankInfo::InstructionMapping Mapping =
- InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
if (!Mapping->isValid())
- return InstructionMapping();
+ return getInvalidInstructionMapping();
OpdsMapping[Idx] = Mapping;
}
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ return getInstructionMapping(DefaultMappingID, Cost,
+ getOperandsMapping(OpdsMapping), NumOperands);
}
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h b/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h
index 0a795a42c0b1..6d74a47095a9 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h
+++ b/contrib/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h
@@ -98,8 +98,8 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
///
/// \return An InstructionMappings with a statically allocated
/// OperandsMapping.
- static InstructionMapping
- getSameKindOfOperandsMapping(const MachineInstr &MI);
+ const InstructionMapping &
+ getSameKindOfOperandsMapping(const MachineInstr &MI) const;
public:
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI);
@@ -113,7 +113,8 @@ public:
InstructionMappings
getInstrAlternativeMappings(const MachineInstr &MI) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index de7108d302dd..5a90fd1eb1ba 100644
--- a/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -109,11 +109,6 @@ EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
cl::init(false));
static cl::opt<bool>
- EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden,
- cl::desc("Enable the type promotion pass"),
- cl::init(false));
-
-static cl::opt<bool>
EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
cl::desc("Enable optimizations on complex GEPs"),
cl::init(false));
@@ -146,7 +141,6 @@ extern "C" void LLVMInitializeAArch64Target() {
initializeGlobalISel(*PR);
initializeAArch64A53Fix835769Pass(*PR);
initializeAArch64A57FPLoadBalancingPass(*PR);
- initializeAArch64AddressTypePromotionPass(*PR);
initializeAArch64AdvSIMDScalarPass(*PR);
initializeAArch64CollectLOHPass(*PR);
initializeAArch64ConditionalComparesPass(*PR);
@@ -382,9 +376,6 @@ bool AArch64PassConfig::addPreISel() {
addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
}
- if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion)
- addPass(createAArch64AddressTypePromotionPass());
-
return false;
}
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 2ce23dbf08e6..f473944cd528 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -713,7 +713,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
S_00B84C_EXCP_EN_MSB(0) |
- S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) |
+ // For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP.
+ S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) |
S_00B84C_EXCP_EN(0);
}
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 64e1b8f0d7f0..915d1d9e0e68 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3580,7 +3580,7 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op, KnownBits &Known,
const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits(); // Don't know anything.
+ Known.resetAll(); // Don't know anything.
KnownBits Known2;
unsigned Opc = Op.getOpcode();
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index a5edc0c3b937..623b2c88ab8f 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -82,25 +82,28 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
switch (MI.getOpcode()) {
case TargetOpcode::G_LOAD: {
// FIXME: Should we be hard coding the size for these mappings?
- InstructionMapping SSMapping(1, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(SSMapping));
-
- InstructionMapping VVMapping(2, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(VVMapping));
+ const InstructionMapping &SSMapping = getInstructionMapping(
+ 1, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&SSMapping);
+
+ const InstructionMapping &VVMapping = getInstructionMapping(
+ 2, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&VVMapping);
// FIXME: Should this be the pointer-size (64-bits) or the size of the
// register that will hold the bufffer resourc (128-bits).
- InstructionMapping VSMapping(3, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(VSMapping));
+ const InstructionMapping &VSMapping = getInstructionMapping(
+ 3, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&VSMapping);
return AltMappings;
@@ -124,13 +127,11 @@ static bool isInstrUniform(const MachineInstr &MI) {
return AMDGPU::isUniformMMO(MMO);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- RegisterBankInfo::InstructionMapping Mapping =
- InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
@@ -150,32 +151,34 @@ AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
+ 1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands());
return Mapping;
// FIXME: Do we want to add a mapping for FLAT load, or should we just
// handle that during instruction selection?
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- Mapping = InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
+ bool IsComplete = true;
switch (MI.getOpcode()) {
- default: break;
+ default:
+ IsComplete = false;
+ break;
case AMDGPU::G_CONSTANT: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_GEP: {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -185,8 +188,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned Size = MRI.getType(MI.getOperand(i).getReg()).getSizeInBits();
OpdsMapping[i] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_STORE: {
assert(MI.getOperand(0).isReg());
@@ -203,28 +205,27 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_LOAD:
return getInstrMappingForLoad(MI);
}
- unsigned BankID = AMDGPU::SGPRRegBankID;
-
- Mapping = InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
- unsigned Size = 0;
- for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
- // If the operand is not a register default to the size of the previous
- // operand.
- // FIXME: Can't we pull the types from the MachineInstr rather than the
- // operands.
- if (MI.getOperand(Idx).isReg())
- Size = getSizeInBits(MI.getOperand(Idx).getReg(), MRI, *TRI);
- OpdsMapping.push_back(AMDGPU::getValueMapping(BankID, Size));
+ if (!IsComplete) {
+ unsigned BankID = AMDGPU::SGPRRegBankID;
+
+ unsigned Size = 0;
+ for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
+ // If the operand is not a register default to the size of the previous
+ // operand.
+ // FIXME: Can't we pull the types from the MachineInstr rather than the
+ // operands.
+ if (MI.getOperand(Idx).isReg())
+ Size = getSizeInBits(MI.getOperand(Idx).getReg(), MRI, *TRI);
+ OpdsMapping.push_back(AMDGPU::getValueMapping(BankID, Size));
+ }
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
-
- return Mapping;
+ return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
+ MI.getNumOperands());
}
diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index f13bde87ef2d..7c198a1b8a3f 100644
--- a/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -44,7 +44,7 @@ class AMDGPURegisterBankInfo : public AMDGPUGenRegisterBankInfo {
/// See RegisterBankInfo::applyMapping.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
- RegisterBankInfo::InstructionMapping
+ const RegisterBankInfo::InstructionMapping &
getInstrMappingForLoad(const MachineInstr &MI) const;
public:
@@ -59,7 +59,8 @@ public:
InstructionMappings
getInstrAlternativeMappings(const MachineInstr &MI) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/contrib/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index 86e3b37b09e9..1279f845de0e 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -353,7 +353,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
if (OffsetRegUsed &&
PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
- .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
+ .addReg(PreloadedScratchWaveOffsetReg,
+ MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill);
}
if (CopyBuffer && !CopyBufferFirst) {
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 853c8737b464..cc93c27731ff 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1042,6 +1042,7 @@ static void allocateHSAUserSGPRs(CCState &CCInfo,
static void allocateSystemSGPRs(CCState &CCInfo,
MachineFunction &MF,
SIMachineFunctionInfo &Info,
+ CallingConv::ID CallConv,
bool IsShader) {
if (Info.hasWorkGroupIDX()) {
unsigned Reg = Info.addWorkGroupIDX();
@@ -1072,8 +1073,15 @@ static void allocateSystemSGPRs(CCState &CCInfo,
unsigned PrivateSegmentWaveByteOffsetReg;
if (IsShader) {
- PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
- Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ PrivateSegmentWaveByteOffsetReg =
+ Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
+
+ // This is true if the scratch wave byte offset doesn't have a fixed
+ // location.
+ if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
+ PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
+ Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ }
} else
PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
@@ -1310,7 +1318,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// Start adding system SGPRs.
if (IsEntryFunc)
- allocateSystemSGPRs(CCInfo, MF, *Info, IsShader);
+ allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/contrib/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 9122cd72d323..b5e3ce3dfe3e 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1087,7 +1087,7 @@ MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
(CntVal[LGKM_CNT] & AMDGPU::getLgkmcntBitMask(IV)))) {
MachineLoop *ContainingLoop = MLI->getLoopFor(MI.getParent());
if (ContainingLoop) {
- MachineBasicBlock *TBB = ContainingLoop->getTopBlock();
+ MachineBasicBlock *TBB = ContainingLoop->getHeader();
BlockWaitcntBrackets *ScoreBracket =
BlockWaitcntBracketsMap[TBB].get();
if (!ScoreBracket) {
@@ -1097,7 +1097,7 @@ MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
}
ScoreBracket->setRevisitLoop(true);
DEBUG(dbgs() << "set-revisit: block"
- << ContainingLoop->getTopBlock()->getNumber() << '\n';);
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
}
@@ -1758,12 +1758,12 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
// If we are walking into the block from before the loop, then guarantee
// at least 1 re-walk over the loop to propagate the information, even if
// no S_WAITCNT instructions were generated.
- if (ContainingLoop && ContainingLoop->getTopBlock() == &MBB && J < I &&
+ if (ContainingLoop && ContainingLoop->getHeader() == &MBB && J < I &&
(BlockWaitcntProcessedSet.find(&MBB) ==
BlockWaitcntProcessedSet.end())) {
BlockWaitcntBracketsMap[&MBB]->setRevisitLoop(true);
DEBUG(dbgs() << "set-revisit: block"
- << ContainingLoop->getTopBlock()->getNumber() << '\n';);
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
// Walk over the instructions.
@@ -1774,7 +1774,7 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
// See if we want to revisit the loop.
if (ContainingLoop && loopBottom(ContainingLoop) == &MBB) {
- MachineBasicBlock *EntryBB = ContainingLoop->getTopBlock();
+ MachineBasicBlock *EntryBB = ContainingLoop->getHeader();
BlockWaitcntBrackets *EntrySB = BlockWaitcntBracketsMap[EntryBB].get();
if (EntrySB && EntrySB->getRevisitLoop()) {
EntrySB->setRevisitLoop(false);
diff --git a/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index b6a982aee6be..adebb8c4a1c5 100644
--- a/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -122,9 +122,15 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
bool MaySpill = ST.isVGPRSpillingEnabled(*F);
bool HasStackObjects = FrameInfo.hasStackObjects();
- if (HasStackObjects || MaySpill)
+ if (HasStackObjects || MaySpill) {
PrivateSegmentWaveByteOffset = true;
+ // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
+ if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
+ (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
+ PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5;
+ }
+
if (ST.isAmdCodeObjectV2(MF)) {
if (HasStackObjects || MaySpill)
PrivateSegmentBuffer = true;
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index a20887564f44..b18ed509ed23 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -245,11 +245,18 @@ ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
switch (RC->getID()) {
default:
return 0;
- case ARM::tGPRRegClassID:
- return TFI->hasFP(MF) ? 4 : 5;
+ case ARM::tGPRRegClassID: {
+ // hasFP ends up calling getMaxCallFrameComputed() which may not be
+ // available when getPressureLimit() is called as part of
+ // ScheduleDAGRRList.
+ bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
+ ? TFI->hasFP(MF) : true;
+ return 5 - HasFP;
+ }
case ARM::GPRRegClassID: {
- unsigned FP = TFI->hasFP(MF) ? 1 : 0;
- return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
+ bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
+ ? TFI->hasFP(MF) : true;
+ return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
}
case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
case ARM::DPRRegClassID:
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9f7e60a848d9..e64582402fe1 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -202,7 +202,7 @@ void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
if (!VT.isFloatingPoint() &&
VT != MVT::v2i64 && VT != MVT::v1i64)
- for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
+ for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
setOperationAction(Opcode, VT, Legal);
}
@@ -822,6 +822,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL, MVT::i64, Custom);
setOperationAction(ISD::SRA, MVT::i64, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::ADDC, MVT::i32, Custom);
setOperationAction(ISD::ADDE, MVT::i32, Custom);
@@ -1344,6 +1345,10 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::SMLALTT: return "ARMISD::SMLALTT";
case ARMISD::SMULWB: return "ARMISD::SMULWB";
case ARMISD::SMULWT: return "ARMISD::SMULWT";
+ case ARMISD::SMLALD: return "ARMISD::SMLALD";
+ case ARMISD::SMLALDX: return "ARMISD::SMLALDX";
+ case ARMISD::SMLSLD: return "ARMISD::SMLSLD";
+ case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX";
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::BFI: return "ARMISD::BFI";
case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
@@ -3311,6 +3316,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
}
return Result;
}
+ case Intrinsic::arm_neon_vabs:
+ return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::arm_neon_vmulls:
case Intrinsic::arm_neon_vmullu: {
unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
@@ -7722,6 +7730,37 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
}
}
+static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned Opc = 0;
+ if (IntNo == Intrinsic::arm_smlald)
+ Opc = ARMISD::SMLALD;
+ else if (IntNo == Intrinsic::arm_smlaldx)
+ Opc = ARMISD::SMLALDX;
+ else if (IntNo == Intrinsic::arm_smlsld)
+ Opc = ARMISD::SMLSLD;
+ else if (IntNo == Intrinsic::arm_smlsldx)
+ Opc = ARMISD::SMLSLDX;
+ else
+ return;
+
+ SDLoc dl(N);
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+ N->getOperand(3),
+ DAG.getConstant(0, dl, MVT::i32));
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+ N->getOperand(3),
+ DAG.getConstant(1, dl, MVT::i32));
+
+ SDValue LongMul = DAG.getNode(Opc, dl,
+ DAG.getVTList(MVT::i32, MVT::i32),
+ N->getOperand(1), N->getOperand(2),
+ Lo, Hi);
+ Results.push_back(LongMul.getValue(0));
+ Results.push_back(LongMul.getValue(1));
+}
+
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
@@ -7763,6 +7802,8 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ATOMIC_CMP_SWAP:
ReplaceCMP_SWAP_64Results(N, Results, DAG);
return;
+ case ISD::INTRINSIC_WO_CHAIN:
+ return ReplaceLongIntrinsic(N, Results, DAG);
}
if (Res.getNode())
Results.push_back(Res);
@@ -12602,7 +12643,7 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const SelectionDAG &DAG,
unsigned Depth) const {
unsigned BitWidth = Known.getBitWidth();
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case ARMISD::ADDC:
@@ -12617,7 +12658,8 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
- if (Known.Zero == 0 && Known.One == 0) return;
+ if (Known.isUnknown())
+ return;
KnownBits KnownRHS;
DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
@@ -14015,3 +14057,8 @@ void ARMTargetLowering::insertCopiesSplitCSR(
.addReg(NewVR);
}
}
+
+void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
+ MF.getFrameInfo().computeMaxCallFrameSize(MF);
+ TargetLoweringBase::finalizeLowering(MF);
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 76e4b60e01fb..08c51b66dfe7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -184,6 +184,10 @@ class InstrItineraryData;
SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
SMLALTT, // 64-bit signed accumulate multiply top, top 16
+ SMLALD, // Signed multiply accumulate long dual
+ SMLALDX, // Signed multiply accumulate long dual exchange
+ SMLSLD, // Signed multiply subtract long dual
+ SMLSLDX, // Signed multiply subtract long dual exchange
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
@@ -540,6 +544,8 @@ class InstrItineraryData;
unsigned getNumInterleavedAccesses(VectorType *VecTy,
const DataLayout &DL) const;
+ void finalizeLowering(MachineFunction &MF) const override;
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index 28eb5fc30864..a94d6048f02d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -99,6 +99,11 @@ def SDT_LongMac : SDTypeProfile<2, 4, [SDTCisVT<0, i32>,
SDTCisSameAs<0, 4>,
SDTCisSameAs<0, 5>]>;
+def ARMSmlald : SDNode<"ARMISD::SMLALD", SDT_LongMac>;
+def ARMSmlaldx : SDNode<"ARMISD::SMLALDX", SDT_LongMac>;
+def ARMSmlsld : SDNode<"ARMISD::SMLSLD", SDT_LongMac>;
+def ARMSmlsldx : SDNode<"ARMISD::SMLSLDX", SDT_LongMac>;
+
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
@@ -870,7 +875,9 @@ def imm1_16_XFORM: SDNodeXForm<imm, [{
MVT::i32);
}]>;
def Imm1_16AsmOperand: ImmAsmOperandMinusOne<1,16> { let Name = "Imm1_16"; }
-def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }],
+def imm1_16 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm > 0 && Imm <= 16;
+ }],
imm1_16_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_16AsmOperand;
@@ -1983,7 +1990,9 @@ def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
def : InstAlias<"esb$p", (HINT 16, pred:$p)>, Requires<[IsARM, HasRAS]>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
- "\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
+ "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_sel GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
@@ -3472,8 +3481,12 @@ def : ARMV6Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, imm8_or_16:$rot),
(SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
def SXTB16 : AI_ext_rrot_np<0b01101000, "sxtb16">;
+def : ARMV6Pat<(int_arm_sxtb16 GPR:$Src),
+ (SXTB16 GPR:$Src, 0)>;
def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
+def : ARMV6Pat<(int_arm_sxtab16 GPR:$LHS, GPR:$RHS),
+ (SXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
// Zero extenders
@@ -3493,6 +3506,8 @@ def UXTB16 : AI_ext_rrot<0b01101100,
// (UXTB16r_rot GPR:$Src, 3)>;
def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
(UXTB16 GPR:$Src, 1)>;
+def : ARMV6Pat<(int_arm_uxtb16 GPR:$Src),
+ (UXTB16 GPR:$Src, 0)>;
def UXTAB : AI_exta_rrot<0b01101110, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
@@ -3507,6 +3522,8 @@ def : ARMV6Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot), 0xFFFF)),
// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
def UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
+def : ARMV6Pat<(int_arm_uxtab16 GPR:$LHS, GPR:$RHS),
+ (UXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
def SBFX : I<(outs GPRnopc:$Rd),
@@ -3633,71 +3650,85 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
let Unpredictable{11-8} = 0b1111;
}
-// Saturating add/subtract
+// Wrappers around the AAI class
+class AAIRevOpr<bits<8> op27_20, bits<8> op11_4, string opc,
+ list<dag> pattern = []>
+ : AAI<op27_20, op11_4, opc,
+ pattern,
+ (ins GPRnopc:$Rm, GPRnopc:$Rn),
+ "\t$Rd, $Rm, $Rn">;
+class AAIIntrinsic<bits<8> op27_20, bits<8> op11_4, string opc,
+ Intrinsic intrinsic>
+ : AAI<op27_20, op11_4, opc,
+ [(set GPRnopc:$Rd, (intrinsic GPRnopc:$Rn, GPRnopc:$Rm))]>;
+
+// Saturating add/subtract
+let hasSideEffects = 1 in {
+def QADD8 : AAIIntrinsic<0b01100010, 0b11111001, "qadd8", int_arm_qadd8>;
+def QADD16 : AAIIntrinsic<0b01100010, 0b11110001, "qadd16", int_arm_qadd16>;
+def QSUB16 : AAIIntrinsic<0b01100010, 0b11110111, "qsub16", int_arm_qsub16>;
+def QSUB8 : AAIIntrinsic<0b01100010, 0b11111111, "qsub8", int_arm_qsub8>;
+
+def QDADD : AAIRevOpr<0b00010100, 0b00000101, "qdadd",
+ [(set GPRnopc:$Rd, (int_arm_qadd (int_arm_qadd GPRnopc:$Rm,
+ GPRnopc:$Rm),
+ GPRnopc:$Rn))]>;
+def QDSUB : AAIRevOpr<0b00010110, 0b00000101, "qdsub",
+ [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm,
+ (int_arm_qadd GPRnopc:$Rn, GPRnopc:$Rn)))]>;
+def QSUB : AAIRevOpr<0b00010010, 0b00000101, "qsub",
+ [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))]>;
let DecoderMethod = "DecodeQADDInstruction" in
-def QADD : AAI<0b00010000, 0b00000101, "qadd",
- [(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))],
- (ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
-
-def QSUB : AAI<0b00010010, 0b00000101, "qsub",
- [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))],
- (ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
-def QDADD : AAI<0b00010100, 0b00000101, "qdadd", [],
- (ins GPRnopc:$Rm, GPRnopc:$Rn),
- "\t$Rd, $Rm, $Rn">;
-def QDSUB : AAI<0b00010110, 0b00000101, "qdsub", [],
- (ins GPRnopc:$Rm, GPRnopc:$Rn),
- "\t$Rd, $Rm, $Rn">;
-
-def QADD16 : AAI<0b01100010, 0b11110001, "qadd16">;
-def QADD8 : AAI<0b01100010, 0b11111001, "qadd8">;
-def QASX : AAI<0b01100010, 0b11110011, "qasx">;
-def QSAX : AAI<0b01100010, 0b11110101, "qsax">;
-def QSUB16 : AAI<0b01100010, 0b11110111, "qsub16">;
-def QSUB8 : AAI<0b01100010, 0b11111111, "qsub8">;
-def UQADD16 : AAI<0b01100110, 0b11110001, "uqadd16">;
-def UQADD8 : AAI<0b01100110, 0b11111001, "uqadd8">;
-def UQASX : AAI<0b01100110, 0b11110011, "uqasx">;
-def UQSAX : AAI<0b01100110, 0b11110101, "uqsax">;
-def UQSUB16 : AAI<0b01100110, 0b11110111, "uqsub16">;
-def UQSUB8 : AAI<0b01100110, 0b11111111, "uqsub8">;
+ def QADD : AAIRevOpr<0b00010000, 0b00000101, "qadd",
+ [(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))]>;
+}
+
+def UQADD16 : AAIIntrinsic<0b01100110, 0b11110001, "uqadd16", int_arm_uqadd16>;
+def UQADD8 : AAIIntrinsic<0b01100110, 0b11111001, "uqadd8", int_arm_uqadd8>;
+def UQSUB16 : AAIIntrinsic<0b01100110, 0b11110111, "uqsub16", int_arm_uqsub16>;
+def UQSUB8 : AAIIntrinsic<0b01100110, 0b11111111, "uqsub8", int_arm_uqsub8>;
+def QASX : AAIIntrinsic<0b01100010, 0b11110011, "qasx", int_arm_qasx>;
+def QSAX : AAIIntrinsic<0b01100010, 0b11110101, "qsax", int_arm_qsax>;
+def UQASX : AAIIntrinsic<0b01100110, 0b11110011, "uqasx", int_arm_uqasx>;
+def UQSAX : AAIIntrinsic<0b01100110, 0b11110101, "uqsax", int_arm_uqsax>;
// Signed/Unsigned add/subtract
-def SASX : AAI<0b01100001, 0b11110011, "sasx">;
-def SADD16 : AAI<0b01100001, 0b11110001, "sadd16">;
-def SADD8 : AAI<0b01100001, 0b11111001, "sadd8">;
-def SSAX : AAI<0b01100001, 0b11110101, "ssax">;
-def SSUB16 : AAI<0b01100001, 0b11110111, "ssub16">;
-def SSUB8 : AAI<0b01100001, 0b11111111, "ssub8">;
-def UASX : AAI<0b01100101, 0b11110011, "uasx">;
-def UADD16 : AAI<0b01100101, 0b11110001, "uadd16">;
-def UADD8 : AAI<0b01100101, 0b11111001, "uadd8">;
-def USAX : AAI<0b01100101, 0b11110101, "usax">;
-def USUB16 : AAI<0b01100101, 0b11110111, "usub16">;
-def USUB8 : AAI<0b01100101, 0b11111111, "usub8">;
+def SASX : AAIIntrinsic<0b01100001, 0b11110011, "sasx", int_arm_sasx>;
+def SADD16 : AAIIntrinsic<0b01100001, 0b11110001, "sadd16", int_arm_sadd16>;
+def SADD8 : AAIIntrinsic<0b01100001, 0b11111001, "sadd8", int_arm_sadd8>;
+def SSAX : AAIIntrinsic<0b01100001, 0b11110101, "ssax", int_arm_ssax>;
+def SSUB16 : AAIIntrinsic<0b01100001, 0b11110111, "ssub16", int_arm_ssub16>;
+def SSUB8 : AAIIntrinsic<0b01100001, 0b11111111, "ssub8", int_arm_ssub8>;
+def UASX : AAIIntrinsic<0b01100101, 0b11110011, "uasx", int_arm_uasx>;
+def UADD16 : AAIIntrinsic<0b01100101, 0b11110001, "uadd16", int_arm_uadd16>;
+def UADD8 : AAIIntrinsic<0b01100101, 0b11111001, "uadd8", int_arm_uadd8>;
+def USAX : AAIIntrinsic<0b01100101, 0b11110101, "usax", int_arm_usax>;
+def USUB16 : AAIIntrinsic<0b01100101, 0b11110111, "usub16", int_arm_usub16>;
+def USUB8 : AAIIntrinsic<0b01100101, 0b11111111, "usub8", int_arm_usub8>;
// Signed/Unsigned halving add/subtract
-def SHASX : AAI<0b01100011, 0b11110011, "shasx">;
-def SHADD16 : AAI<0b01100011, 0b11110001, "shadd16">;
-def SHADD8 : AAI<0b01100011, 0b11111001, "shadd8">;
-def SHSAX : AAI<0b01100011, 0b11110101, "shsax">;
-def SHSUB16 : AAI<0b01100011, 0b11110111, "shsub16">;
-def SHSUB8 : AAI<0b01100011, 0b11111111, "shsub8">;
-def UHASX : AAI<0b01100111, 0b11110011, "uhasx">;
-def UHADD16 : AAI<0b01100111, 0b11110001, "uhadd16">;
-def UHADD8 : AAI<0b01100111, 0b11111001, "uhadd8">;
-def UHSAX : AAI<0b01100111, 0b11110101, "uhsax">;
-def UHSUB16 : AAI<0b01100111, 0b11110111, "uhsub16">;
-def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
+def SHASX : AAIIntrinsic<0b01100011, 0b11110011, "shasx", int_arm_shasx>;
+def SHADD16 : AAIIntrinsic<0b01100011, 0b11110001, "shadd16", int_arm_shadd16>;
+def SHADD8 : AAIIntrinsic<0b01100011, 0b11111001, "shadd8", int_arm_shadd8>;
+def SHSAX : AAIIntrinsic<0b01100011, 0b11110101, "shsax", int_arm_shsax>;
+def SHSUB16 : AAIIntrinsic<0b01100011, 0b11110111, "shsub16", int_arm_shsub16>;
+def SHSUB8 : AAIIntrinsic<0b01100011, 0b11111111, "shsub8", int_arm_shsub8>;
+def UHASX : AAIIntrinsic<0b01100111, 0b11110011, "uhasx", int_arm_uhasx>;
+def UHADD16 : AAIIntrinsic<0b01100111, 0b11110001, "uhadd16", int_arm_uhadd16>;
+def UHADD8 : AAIIntrinsic<0b01100111, 0b11111001, "uhadd8", int_arm_uhadd8>;
+def UHSAX : AAIIntrinsic<0b01100111, 0b11110101, "uhsax", int_arm_uhsax>;
+def UHSUB16 : AAIIntrinsic<0b01100111, 0b11110111, "uhsub16", int_arm_uhsub16>;
+def UHSUB8 : AAIIntrinsic<0b01100111, 0b11111111, "uhsub8", int_arm_uhsub8>;
// Unsigned Sum of Absolute Differences [and Accumulate].
def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
- "\t$Rd, $Rn, $Rm", []>,
+ "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_usad8 GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
@@ -3711,7 +3742,8 @@ def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
}
def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
- "\t$Rd, $Rn, $Rm, $Ra", []>,
+ "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (int_arm_usada8 GPR:$Rn, GPR:$Rm, GPR:$Ra))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]>{
bits<4> Rd;
bits<4> Rn;
@@ -3726,7 +3758,6 @@ def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
}
// Signed/Unsigned saturate
-
def SSAT : AI<(outs GPRnopc:$Rd),
(ins imm1_32:$sat_imm, GPRnopc:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "ssat", "\t$Rd, $sat_imm, $Rn$sh", []>,
@@ -3795,6 +3826,10 @@ def : ARMV6Pat<(int_arm_usat GPRnopc:$a, imm0_31:$pos),
(USAT imm0_31:$pos, GPRnopc:$a, 0)>;
def : ARMPat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
(SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
+def : ARMV6Pat<(int_arm_ssat16 GPRnopc:$a, imm1_16:$pos),
+ (SSAT16 imm1_16:$pos, GPRnopc:$a)>;
+def : ARMV6Pat<(int_arm_usat16 GPRnopc:$a, imm0_15:$pos),
+ (USAT16 imm0_15:$pos, GPRnopc:$a)>;
//===----------------------------------------------------------------------===//
// Bitwise Instructions.
@@ -4220,8 +4255,8 @@ multiclass AI_smla<string opc> {
IIC_iMAC16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (ARMsmulwt GPRnopc:$Rn, GPRnopc:$Rm)))]>,
- Requires<[IsARM, HasV5TE, UseMulOps]>,
- Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>,
+ Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
}
}
@@ -4255,7 +4290,8 @@ def : ARMV5TEPat<(ARMsmlaltt GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
// Helper class for AI_smld.
class AMulDualIbase<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
- : AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
+ : AI<oops, iops, MulFrm, itin, opc, asm, []>,
+ Requires<[IsARM, HasV6]> {
bits<4> Rn;
bits<4> Rm;
let Inst{27-23} = 0b01110;
@@ -4305,20 +4341,40 @@ multiclass AI_smld<bit sub, string opc> {
Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]>;
def LD: AMulDualI64<1, sub, 0, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
- (ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ NoItinerary,
!strconcat(opc, "ld"), "\t$RdLo, $RdHi, $Rn, $Rm">,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Sched<[WriteMAC64Lo, WriteMAC64Hi, ReadMUL, ReadMUL, ReadMAC, ReadMAC]>;
def LDX : AMulDualI64<1, sub, 1, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
- (ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ NoItinerary,
!strconcat(opc, "ldx"),"\t$RdLo, $RdHi, $Rn, $Rm">,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Sched<[WriteMUL64Lo, WriteMUL64Hi, ReadMUL, ReadMUL]>;
-
}
defm SMLA : AI_smld<0, "smla">;
defm SMLS : AI_smld<1, "smls">;
+def : ARMV6Pat<(int_arm_smlad GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLAD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smladx GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLADX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smlsd GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLSD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smlsdx GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLSDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(ARMSmlald GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLALD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlaldx GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLALDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlsld GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLSLD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlsldx GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLSLDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+
multiclass AI_sdml<bit sub, string opc> {
def D:AMulDualI<0, sub, 0, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm),
@@ -4332,6 +4388,15 @@ multiclass AI_sdml<bit sub, string opc> {
defm SMUA : AI_sdml<0, "smua">;
defm SMUS : AI_sdml<1, "smus">;
+def : ARMV6Pat<(int_arm_smuad GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUAD GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smuadx GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUADX GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smusd GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUSD GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smusdx GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUSDX GPRnopc:$Rn, GPRnopc:$Rm)>;
+
//===----------------------------------------------------------------------===//
// Division Instructions (ARMv7-A with virtualization extension)
//
@@ -5648,6 +5713,32 @@ def : ARMV5MOPat<(add GPR:$acc,
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>,
Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
+def : ARMV5TEPat<(int_arm_smulbb GPR:$a, GPR:$b),
+ (SMULBB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulbt GPR:$a, GPR:$b),
+ (SMULBT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smultb GPR:$a, GPR:$b),
+ (SMULTB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smultt GPR:$a, GPR:$b),
+ (SMULTT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulwb GPR:$a, GPR:$b),
+ (SMULWB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulwt GPR:$a, GPR:$b),
+ (SMULWT GPR:$a, GPR:$b)>;
+
+def : ARMV5TEPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlabt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlatb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlatt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLATT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlawb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlawt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLAWT GPR:$a, GPR:$b, GPR:$acc)>;
+
// Pre-v7 uses MCR for synchronization barriers.
def : ARMPat<(ARMMemBarrierMCR GPR:$zero), (MCR 15, 0, GPR:$zero, 7, 10, 5)>,
Requires<[IsARM, HasV6]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 9b08c612e16b..51290e5a5b93 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -5558,8 +5558,7 @@ defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">;
// VABS : Vector Absolute Value
defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
- IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
- int_arm_neon_vabs>;
+ IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s", abs>;
def VABSfd : N2VD<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
"vabs", "f32",
v2f32, v2f32, fabs>;
@@ -5575,29 +5574,6 @@ def VABShq : N2VQ<0b11, 0b11, 0b01, 0b01, 0b01110, 0,
v8f16, v8f16, fabs>,
Requires<[HasNEON, HasFullFP16]>;
-def : Pat<(xor (v2i32 (bitconvert (v8i8 (NEONvshrs DPR:$src, (i32 7))))),
- (v2i32 (bitconvert (v8i8 (add DPR:$src,
- (NEONvshrs DPR:$src, (i32 7))))))),
- (VABSv8i8 DPR:$src)>;
-def : Pat<(xor (v2i32 (bitconvert (v4i16 (NEONvshrs DPR:$src, (i32 15))))),
- (v2i32 (bitconvert (v4i16 (add DPR:$src,
- (NEONvshrs DPR:$src, (i32 15))))))),
- (VABSv4i16 DPR:$src)>;
-def : Pat<(xor (v2i32 (NEONvshrs DPR:$src, (i32 31))),
- (v2i32 (add DPR:$src, (NEONvshrs DPR:$src, (i32 31))))),
- (VABSv2i32 DPR:$src)>;
-def : Pat<(xor (v4i32 (bitconvert (v16i8 (NEONvshrs QPR:$src, (i32 7))))),
- (v4i32 (bitconvert (v16i8 (add QPR:$src,
- (NEONvshrs QPR:$src, (i32 7))))))),
- (VABSv16i8 QPR:$src)>;
-def : Pat<(xor (v4i32 (bitconvert (v8i16 (NEONvshrs QPR:$src, (i32 15))))),
- (v4i32 (bitconvert (v8i16 (add QPR:$src,
- (NEONvshrs QPR:$src, (i32 15))))))),
- (VABSv8i16 QPR:$src)>;
-def : Pat<(xor (v4i32 (NEONvshrs QPR:$src, (i32 31))),
- (v4i32 (add QPR:$src, (NEONvshrs QPR:$src, (i32 31))))),
- (VABSv4i32 QPR:$src)>;
-
// VQABS : Vector Saturating Absolute Value
defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index f710ee6a7e77..bf3d820e7b7d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -1993,6 +1993,10 @@ def : Thumb2DSPPat<(add rGPR:$Rn,
def : Thumb2DSPPat<(add rGPR:$Rn,
(sext_inreg (rotr rGPR:$Rm, rot_imm:$rot), i16)),
(t2SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_sxtb16 rGPR:$Rn),
+ (t2SXTB16 rGPR:$Rn, 0)>;
+def : Thumb2DSPPat<(int_arm_sxtab16 rGPR:$Rn, rGPR:$Rm),
+ (t2SXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
// A simple right-shift can also be used in most cases (the exception is the
@@ -2026,6 +2030,9 @@ def : Thumb2DSPPat<(and (rotr rGPR:$Rm, rot_imm:$rot), 0x0000FFFF),
def : Thumb2DSPPat<(and (rotr rGPR:$Rm, rot_imm:$rot), 0x00FF00FF),
(t2UXTB16 rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_uxtb16 rGPR:$Rm),
+ (t2UXTB16 rGPR:$Rm, 0)>;
+
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
// instead so we can include a check for masking back in the upper
@@ -2053,6 +2060,8 @@ def : Thumb2DSPPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, rot_imm:$rot),
def : Thumb2DSPPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot),
0xFFFF)),
(t2UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_uxtab16 rGPR:$Rn, rGPR:$Rm),
+ (t2UXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
}
@@ -2137,10 +2146,9 @@ def : T2Pat<(ARMadde rGPR:$src, t2_so_imm_not:$imm, CPSR),
def : T2Pat<(ARMadde rGPR:$src, imm0_65535_neg:$imm, CPSR),
(t2SBCrr rGPR:$src, (t2MOVi16 (imm_not_XFORM imm:$imm)))>;
-// Select Bytes -- for disassembly only
-
def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
- NoItinerary, "sel", "\t$Rd, $Rn, $Rm", []>,
+ NoItinerary, "sel", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_sel GPR:$Rn, GPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]> {
let Inst{31-27} = 0b11111;
let Inst{26-24} = 0b010;
@@ -2154,9 +2162,7 @@ def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
// A6.3.13, A6.3.14, A6.3.15 Parallel addition and subtraction (signed/unsigned)
// And Miscellaneous operations -- for disassembly only
class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
- list<dag> pat = [/* For disassembly only; pattern left blank */],
- dag iops = (ins rGPR:$Rn, rGPR:$Rm),
- string asm = "\t$Rd, $Rn, $Rm">
+ list<dag> pat, dag iops, string asm>
: T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, pat>,
Requires<[IsThumb2, HasDSP]> {
let Inst{31-27} = 0b11111;
@@ -2174,60 +2180,72 @@ class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
let Inst{3-0} = Rm;
}
-// Saturating add/subtract -- for disassembly only
-
-def t2QADD : T2I_pam<0b000, 0b1000, "qadd",
- [(set rGPR:$Rd, (int_arm_qadd rGPR:$Rn, rGPR:$Rm))],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QADD16 : T2I_pam<0b001, 0b0001, "qadd16">;
-def t2QADD8 : T2I_pam<0b000, 0b0001, "qadd8">;
-def t2QASX : T2I_pam<0b010, 0b0001, "qasx">;
-def t2QDADD : T2I_pam<0b000, 0b1001, "qdadd", [],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QDSUB : T2I_pam<0b000, 0b1011, "qdsub", [],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QSAX : T2I_pam<0b110, 0b0001, "qsax">;
-def t2QSUB : T2I_pam<0b000, 0b1010, "qsub",
- [(set rGPR:$Rd, (int_arm_qsub rGPR:$Rn, rGPR:$Rm))],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QSUB16 : T2I_pam<0b101, 0b0001, "qsub16">;
-def t2QSUB8 : T2I_pam<0b100, 0b0001, "qsub8">;
-def t2UQADD16 : T2I_pam<0b001, 0b0101, "uqadd16">;
-def t2UQADD8 : T2I_pam<0b000, 0b0101, "uqadd8">;
-def t2UQASX : T2I_pam<0b010, 0b0101, "uqasx">;
-def t2UQSAX : T2I_pam<0b110, 0b0101, "uqsax">;
-def t2UQSUB16 : T2I_pam<0b101, 0b0101, "uqsub16">;
-def t2UQSUB8 : T2I_pam<0b100, 0b0101, "uqsub8">;
-
-// Signed/Unsigned add/subtract -- for disassembly only
-
-def t2SASX : T2I_pam<0b010, 0b0000, "sasx">;
-def t2SADD16 : T2I_pam<0b001, 0b0000, "sadd16">;
-def t2SADD8 : T2I_pam<0b000, 0b0000, "sadd8">;
-def t2SSAX : T2I_pam<0b110, 0b0000, "ssax">;
-def t2SSUB16 : T2I_pam<0b101, 0b0000, "ssub16">;
-def t2SSUB8 : T2I_pam<0b100, 0b0000, "ssub8">;
-def t2UASX : T2I_pam<0b010, 0b0100, "uasx">;
-def t2UADD16 : T2I_pam<0b001, 0b0100, "uadd16">;
-def t2UADD8 : T2I_pam<0b000, 0b0100, "uadd8">;
-def t2USAX : T2I_pam<0b110, 0b0100, "usax">;
-def t2USUB16 : T2I_pam<0b101, 0b0100, "usub16">;
-def t2USUB8 : T2I_pam<0b100, 0b0100, "usub8">;
-
-// Signed/Unsigned halving add/subtract -- for disassembly only
-
-def t2SHASX : T2I_pam<0b010, 0b0010, "shasx">;
-def t2SHADD16 : T2I_pam<0b001, 0b0010, "shadd16">;
-def t2SHADD8 : T2I_pam<0b000, 0b0010, "shadd8">;
-def t2SHSAX : T2I_pam<0b110, 0b0010, "shsax">;
-def t2SHSUB16 : T2I_pam<0b101, 0b0010, "shsub16">;
-def t2SHSUB8 : T2I_pam<0b100, 0b0010, "shsub8">;
-def t2UHASX : T2I_pam<0b010, 0b0110, "uhasx">;
-def t2UHADD16 : T2I_pam<0b001, 0b0110, "uhadd16">;
-def t2UHADD8 : T2I_pam<0b000, 0b0110, "uhadd8">;
-def t2UHSAX : T2I_pam<0b110, 0b0110, "uhsax">;
-def t2UHSUB16 : T2I_pam<0b101, 0b0110, "uhsub16">;
-def t2UHSUB8 : T2I_pam<0b100, 0b0110, "uhsub8">;
+class T2I_pam_intrinsics<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
+ : T2I_pam<op22_20, op7_4, opc,
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm))],
+ (ins rGPR:$Rn, rGPR:$Rm), "\t$Rd, $Rn, $Rm">;
+
+class T2I_pam_intrinsics_rev<bits<3> op22_20, bits<4> op7_4, string opc>
+ : T2I_pam<op22_20, op7_4, opc, [],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
+
+// Saturating add/subtract
+def t2QADD16 : T2I_pam_intrinsics<0b001, 0b0001, "qadd16", int_arm_qadd16>;
+def t2QADD8 : T2I_pam_intrinsics<0b000, 0b0001, "qadd8", int_arm_qadd8>;
+def t2QASX : T2I_pam_intrinsics<0b010, 0b0001, "qasx", int_arm_qasx>;
+def t2UQSUB8 : T2I_pam_intrinsics<0b100, 0b0101, "uqsub8", int_arm_uqsub8>;
+def t2QSAX : T2I_pam_intrinsics<0b110, 0b0001, "qsax", int_arm_qsax>;
+def t2QSUB16 : T2I_pam_intrinsics<0b101, 0b0001, "qsub16", int_arm_qsub16>;
+def t2QSUB8 : T2I_pam_intrinsics<0b100, 0b0001, "qsub8", int_arm_qsub8>;
+def t2UQADD16 : T2I_pam_intrinsics<0b001, 0b0101, "uqadd16", int_arm_uqadd16>;
+def t2UQADD8 : T2I_pam_intrinsics<0b000, 0b0101, "uqadd8", int_arm_uqadd8>;
+def t2UQASX : T2I_pam_intrinsics<0b010, 0b0101, "uqasx", int_arm_uqasx>;
+def t2UQSAX : T2I_pam_intrinsics<0b110, 0b0101, "uqsax", int_arm_uqsax>;
+def t2UQSUB16 : T2I_pam_intrinsics<0b101, 0b0101, "uqsub16", int_arm_uqsub16>;
+def t2QADD : T2I_pam_intrinsics_rev<0b000, 0b1000, "qadd">;
+def t2QSUB : T2I_pam_intrinsics_rev<0b000, 0b1010, "qsub">;
+def t2QDADD : T2I_pam_intrinsics_rev<0b000, 0b1001, "qdadd">;
+def t2QDSUB : T2I_pam_intrinsics_rev<0b000, 0b1011, "qdsub">;
+
+def : Thumb2DSPPat<(int_arm_qadd rGPR:$Rm, rGPR:$Rn),
+ (t2QADD rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qsub rGPR:$Rm, rGPR:$Rn),
+ (t2QSUB rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qadd(int_arm_qadd rGPR:$Rm, rGPR:$Rm), rGPR:$Rn),
+ (t2QDADD rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qsub rGPR:$Rm, (int_arm_qadd rGPR:$Rn, rGPR:$Rn)),
+ (t2QDSUB rGPR:$Rm, rGPR:$Rn)>;
+
+// Signed/Unsigned add/subtract
+
+def t2SASX : T2I_pam_intrinsics<0b010, 0b0000, "sasx", int_arm_sasx>;
+def t2SADD16 : T2I_pam_intrinsics<0b001, 0b0000, "sadd16", int_arm_sadd16>;
+def t2SADD8 : T2I_pam_intrinsics<0b000, 0b0000, "sadd8", int_arm_sadd8>;
+def t2SSAX : T2I_pam_intrinsics<0b110, 0b0000, "ssax", int_arm_ssax>;
+def t2SSUB16 : T2I_pam_intrinsics<0b101, 0b0000, "ssub16", int_arm_ssub16>;
+def t2SSUB8 : T2I_pam_intrinsics<0b100, 0b0000, "ssub8", int_arm_ssub8>;
+def t2UASX : T2I_pam_intrinsics<0b010, 0b0100, "uasx", int_arm_uasx>;
+def t2UADD16 : T2I_pam_intrinsics<0b001, 0b0100, "uadd16", int_arm_uadd16>;
+def t2UADD8 : T2I_pam_intrinsics<0b000, 0b0100, "uadd8", int_arm_uadd8>;
+def t2USAX : T2I_pam_intrinsics<0b110, 0b0100, "usax", int_arm_usax>;
+def t2USUB16 : T2I_pam_intrinsics<0b101, 0b0100, "usub16", int_arm_usub16>;
+def t2USUB8 : T2I_pam_intrinsics<0b100, 0b0100, "usub8", int_arm_usub8>;
+
+// Signed/Unsigned halving add/subtract
+
+def t2SHASX : T2I_pam_intrinsics<0b010, 0b0010, "shasx", int_arm_shasx>;
+def t2SHADD16 : T2I_pam_intrinsics<0b001, 0b0010, "shadd16", int_arm_shadd16>;
+def t2SHADD8 : T2I_pam_intrinsics<0b000, 0b0010, "shadd8", int_arm_shadd8>;
+def t2SHSAX : T2I_pam_intrinsics<0b110, 0b0010, "shsax", int_arm_shsax>;
+def t2SHSUB16 : T2I_pam_intrinsics<0b101, 0b0010, "shsub16", int_arm_shsub16>;
+def t2SHSUB8 : T2I_pam_intrinsics<0b100, 0b0010, "shsub8", int_arm_shsub8>;
+def t2UHASX : T2I_pam_intrinsics<0b010, 0b0110, "uhasx", int_arm_uhasx>;
+def t2UHADD16 : T2I_pam_intrinsics<0b001, 0b0110, "uhadd16", int_arm_uhadd16>;
+def t2UHADD8 : T2I_pam_intrinsics<0b000, 0b0110, "uhadd8", int_arm_uhadd8>;
+def t2UHSAX : T2I_pam_intrinsics<0b110, 0b0110, "uhsax", int_arm_uhsax>;
+def t2UHSUB16 : T2I_pam_intrinsics<0b101, 0b0110, "uhsub16", int_arm_uhsub16>;
+def t2UHSUB8 : T2I_pam_intrinsics<0b100, 0b0110, "uhsub8", int_arm_uhsub8>;
// Helper class for disassembly only
// A6.3.16 & A6.3.17
@@ -2255,16 +2273,19 @@ class T2FourReg_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops,
// Unsigned Sum of Absolute Differences [and Accumulate].
def t2USAD8 : T2ThreeReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm),
- NoItinerary, "usad8", "\t$Rd, $Rn, $Rm", []>,
+ NoItinerary, "usad8", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (int_arm_usad8 rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]> {
let Inst{15-12} = 0b1111;
}
def t2USADA8 : T2FourReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), NoItinerary,
- "usada8", "\t$Rd, $Rn, $Rm, $Ra", []>,
+ "usada8", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (int_arm_usada8 rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>,
Requires<[IsThumb2, HasDSP]>;
// Signed/Unsigned saturate.
+let hasSideEffects = 1 in
class T2SatI<dag iops, string opc, string asm>
: T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, []> {
bits<4> Rd;
@@ -2313,10 +2334,16 @@ def t2USAT16: T2SatI<(ins imm0_15:$sat_imm, rGPR:$Rn),
let Inst{4} = 0;
}
-def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos), (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
-def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos), (t2USAT imm0_31:$pos, GPR:$a, 0)>;
def : T2Pat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
(t2SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
+def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos),
+ (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
+def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos),
+ (t2USAT imm0_31:$pos, GPR:$a, 0)>;
+def : T2Pat<(int_arm_ssat16 GPR:$a, imm1_16:$pos),
+ (t2SSAT16 imm1_16:$pos, GPR:$a)>;
+def : T2Pat<(int_arm_usat16 GPR:$a, imm0_15:$pos),
+ (t2USAT16 imm0_15:$pos, GPR:$a)>;
//===----------------------------------------------------------------------===//
// Shift and rotate Instructions.
@@ -2689,6 +2716,18 @@ def : Thumb2DSPPat<(mul sext_16_node:$Rn, (sra rGPR:$Rm, (i32 16))),
(t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
def : Thumb2DSPPat<(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm),
(t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulbb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULBB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulbt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smultb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smultt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULTT rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulwb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULWB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulwt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULWT rGPR:$Rn, rGPR:$Rm)>;
class T2FourRegSMLA<bits<3> op22_20, bits<2> op5_4, string opc,
list<dag> pattern>
@@ -2730,6 +2769,19 @@ def : Thumb2DSPMulPat<(add rGPR:$Ra,
(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm)),
(t2SMLATB rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
+def : Thumb2DSPPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlabt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlatb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlatt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLATT GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlawb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlawt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLAWT GPR:$a, GPR:$b, GPR:$acc)>;
+
// Halfword multiple accumulate long: SMLAL<x><y>
def t2SMLALBB : T2MlaLong<0b100, 0b1000, "smlalbb">,
Requires<[IsThumb2, HasDSP]>;
@@ -2749,39 +2801,44 @@ def : Thumb2DSPPat<(ARMsmlaltb GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
def : Thumb2DSPPat<(ARMsmlaltt GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
(t2SMLALTT $Rn, $Rm, $RLo, $RHi)>;
-class T2DualHalfMul<bits<3> op22_20, bits<4> op7_4, string opc>
+class T2DualHalfMul<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
: T2ThreeReg_mac<0, op22_20, op7_4,
(outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm),
- IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm", []>,
+ IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]>,
Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]> {
let Inst{15-12} = 0b1111;
}
// Dual halfword multiple: SMUAD, SMUSD, SMLAD, SMLSD, SMLALD, SMLSLD
-def t2SMUAD: T2DualHalfMul<0b010, 0b0000, "smuad">;
-def t2SMUADX: T2DualHalfMul<0b010, 0b0001, "smuadx">;
-def t2SMUSD: T2DualHalfMul<0b100, 0b0000, "smusd">;
-def t2SMUSDX: T2DualHalfMul<0b100, 0b0001, "smusdx">;
+def t2SMUAD: T2DualHalfMul<0b010, 0b0000, "smuad", int_arm_smuad>;
+def t2SMUADX: T2DualHalfMul<0b010, 0b0001, "smuadx", int_arm_smuadx>;
+def t2SMUSD: T2DualHalfMul<0b100, 0b0000, "smusd", int_arm_smusd>;
+def t2SMUSDX: T2DualHalfMul<0b100, 0b0001, "smusdx", int_arm_smusdx>;
-class T2DualHalfMulAdd<bits<3> op22_20, bits<4> op7_4, string opc>
+class T2DualHalfMulAdd<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
: T2FourReg_mac<0, op22_20, op7_4,
(outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra),
- IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm, $Ra", []>,
+ IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>,
Requires<[IsThumb2, HasDSP]>;
-def t2SMLAD : T2DualHalfMulAdd<0b010, 0b0000, "smlad">;
-def t2SMLADX : T2DualHalfMulAdd<0b010, 0b0001, "smladx">;
-def t2SMLSD : T2DualHalfMulAdd<0b100, 0b0000, "smlsd">;
-def t2SMLSDX : T2DualHalfMulAdd<0b100, 0b0001, "smlsdx">;
+def t2SMLAD : T2DualHalfMulAdd<0b010, 0b0000, "smlad", int_arm_smlad>;
+def t2SMLADX : T2DualHalfMulAdd<0b010, 0b0001, "smladx", int_arm_smladx>;
+def t2SMLSD : T2DualHalfMulAdd<0b100, 0b0000, "smlsd", int_arm_smlsd>;
+def t2SMLSDX : T2DualHalfMulAdd<0b100, 0b0001, "smlsdx", int_arm_smlsdx>;
class T2DualHalfMulAddLong<bits<3> op22_20, bits<4> op7_4, string opc>
: T2FourReg_mac<1, op22_20, op7_4,
(outs rGPR:$Ra, rGPR:$Rd),
- (ins rGPR:$Rn, rGPR:$Rm),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
IIC_iMAC64, opc, "\t$Ra, $Rd, $Rn, $Rm", []>,
+ RegConstraint<"$Ra = $RLo, $Rd = $RHi">,
Requires<[IsThumb2, HasDSP]>,
Sched<[WriteMAC64Lo, WriteMAC64Hi, ReadMUL, ReadMUL, ReadMAC, ReadMAC]>;
@@ -2790,6 +2847,15 @@ def t2SMLALDX : T2DualHalfMulAddLong<0b100, 0b1101, "smlaldx">;
def t2SMLSLD : T2DualHalfMulAddLong<0b101, 0b1100, "smlsld">;
def t2SMLSLDX : T2DualHalfMulAddLong<0b101, 0b1101, "smlsldx">;
+def : Thumb2DSPPat<(ARMSmlald rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLALD rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlaldx rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLALDX rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlsld rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLSLD rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlsldx rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLSLDX rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+
//===----------------------------------------------------------------------===//
// Division Instructions.
// Signed and unsigned division on v7-M
@@ -4640,6 +4706,19 @@ def : t2InstSubst<"and${s}${p} $Rd, $Rn, $imm",
def : t2InstSubst<"and${s}${p} $Rdn, $imm",
(t2BICri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
+// And ORR <--> ORN
+def : t2InstSubst<"orn${s}${p} $Rd, $Rn, $imm",
+ (t2ORRri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orn${s}${p} $Rdn, $imm",
+ (t2ORRri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orr${s}${p} $Rd, $Rn, $imm",
+ (t2ORNri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orr${s}${p} $Rdn, $imm",
+ (t2ORNri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
// Likewise, "add Rd, t2_so_imm_neg" -> sub
def : t2InstSubst<"add${s}${p} $Rd, $Rn, $imm",
(t2SUBri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm_neg:$imm,
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
index 7325817d446b..13a32211f88c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.cpp
@@ -196,14 +196,14 @@ const RegisterBank &ARMRegisterBankInfo::getRegBankFromRegClass(
llvm_unreachable("Switch should handle all register classes");
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
auto Opc = MI.getOpcode();
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -258,7 +258,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
if (Ty.getSizeInBits() != 64 || Ty1.getSizeInBits() != 32 ||
Ty2.getSizeInBits() != 32)
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
OperandsMapping =
getOperandsMapping({&ARM::ValueMappings[ARM::DPR3OpsIdx],
&ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr,
@@ -271,14 +271,14 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 64 ||
MI.getOperand(2).getImm() % 32 != 0)
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
OperandsMapping = getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx],
&ARM::ValueMappings[ARM::DPR3OpsIdx],
nullptr, nullptr});
break;
}
default:
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
}
#ifndef NDEBUG
@@ -292,6 +292,6 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
#endif
- return InstructionMapping{DefaultMappingID, /*Cost=*/1, OperandsMapping,
- NumOperands};
+ return getInstructionMapping(DefaultMappingID, /*Cost=*/1, OperandsMapping,
+ NumOperands);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.h b/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.h
index 5222c1e6389f..9650b358f319 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterBankInfo.h
@@ -36,7 +36,8 @@ public:
const RegisterBank &
getRegBankFromRegClass(const TargetRegisterClass &RC) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
index 2b0cd461df7a..4a943187ab6d 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
@@ -38,6 +38,7 @@ const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc
void ARMTargetStreamer::emitCurrentConstantPool() {
ConstantPools->emitForCurrentSection(Streamer);
+ ConstantPools->clearCacheForCurrentSection(Streamer);
}
// finish() - write out any non-empty assembler constant pools.
diff --git a/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
index 1f355171ebd3..80357a63a4e1 100644
--- a/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -70,7 +70,7 @@ void BPFAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned Size = Fixup.getKind() == FK_Data_4 ? 4 : 8;
for (unsigned i = 0; i != Size; ++i) {
- unsigned Idx = IsLittleEndian ? i : Size - i;
+ unsigned Idx = IsLittleEndian ? i : Size - i - 1;
Data[Fixup.getOffset() + Idx] = uint8_t(Value >> (i * 8));
}
} else {
diff --git a/contrib/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/contrib/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
index 3396ddbe4fa6..87c212b6163f 100644
--- a/contrib/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
@@ -553,7 +553,7 @@ static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
using namespace Hexagon;
static const MCPhysReg CtrlRegDecoderTable[] = {
/* 0 */ SA0, LC0, SA1, LC1,
- /* 4 */ P3_0, C5, C6, C7,
+ /* 4 */ P3_0, C5, M0, M1,
/* 8 */ USR, PC, UGP, GP,
/* 12 */ CS0, CS1, UPCYCLELO, UPCYCLEHI,
/* 16 */ FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI,
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td b/contrib/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td
new file mode 100644
index 000000000000..1c1788264c66
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepIICHVX.td
@@ -0,0 +1,1143 @@
+//===--- HexagonDepIICHVX.td ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def tc_0317c6ca : InstrItinClass;
+def tc_1b93bdc6 : InstrItinClass;
+def tc_2171ebae : InstrItinClass;
+def tc_28978789 : InstrItinClass;
+def tc_316c637c : InstrItinClass;
+def tc_354299ad : InstrItinClass;
+def tc_35e92f8e : InstrItinClass;
+def tc_38208312 : InstrItinClass;
+def tc_4105d6b5 : InstrItinClass;
+def tc_41f4b64e : InstrItinClass;
+def tc_41f99e1c : InstrItinClass;
+def tc_45453b98 : InstrItinClass;
+def tc_4e2a5159 : InstrItinClass;
+def tc_4fd8566e : InstrItinClass;
+def tc_51cd3aab : InstrItinClass;
+def tc_5a9fc4ec : InstrItinClass;
+def tc_5c120602 : InstrItinClass;
+def tc_5cbf490b : InstrItinClass;
+def tc_644584f8 : InstrItinClass;
+def tc_69b6dd20 : InstrItinClass;
+def tc_6b78cf13 : InstrItinClass;
+def tc_6fd9ad30 : InstrItinClass;
+def tc_71337255 : InstrItinClass;
+def tc_72ad7b54 : InstrItinClass;
+def tc_77a4c701 : InstrItinClass;
+def tc_7c3f55c4 : InstrItinClass;
+def tc_7e9f581b : InstrItinClass;
+def tc_7fa82b08 : InstrItinClass;
+def tc_7fa8b40f : InstrItinClass;
+def tc_85d237e3 : InstrItinClass;
+def tc_8b6a873f : InstrItinClass;
+def tc_908a4c8c : InstrItinClass;
+def tc_9311da3f : InstrItinClass;
+def tc_9777e6bf : InstrItinClass;
+def tc_97c165b9 : InstrItinClass;
+def tc_99093773 : InstrItinClass;
+def tc_9b9642a1 : InstrItinClass;
+def tc_9c267309 : InstrItinClass;
+def tc_a3127e12 : InstrItinClass;
+def tc_a4c9df3b : InstrItinClass;
+def tc_aedb9f9e : InstrItinClass;
+def tc_b06ab583 : InstrItinClass;
+def tc_b712833a : InstrItinClass;
+def tc_b77635b4 : InstrItinClass;
+def tc_bbaf280e : InstrItinClass;
+def tc_bf142ae2 : InstrItinClass;
+def tc_c00bf9c9 : InstrItinClass;
+def tc_c4b515c5 : InstrItinClass;
+def tc_cbf6d1dc : InstrItinClass;
+def tc_cedf314b : InstrItinClass;
+def tc_d2cb81ea : InstrItinClass;
+def tc_d5090f3e : InstrItinClass;
+def tc_d642eff3 : InstrItinClass;
+def tc_d725e5b0 : InstrItinClass;
+def tc_d7bea0ec : InstrItinClass;
+def tc_d98f4d63 : InstrItinClass;
+def tc_da979fb3 : InstrItinClass;
+def tc_db5b9e2f : InstrItinClass;
+def tc_e172d86a : InstrItinClass;
+def tc_e231aa4f : InstrItinClass;
+def tc_e3748cdf : InstrItinClass;
+def tc_e5053c8f : InstrItinClass;
+def tc_e6299d16 : InstrItinClass;
+def tc_eb669007 : InstrItinClass;
+def tc_eda67dcd : InstrItinClass;
+def tc_f3fc3f83 : InstrItinClass;
+
+class DepHVXItinV55 {
+ list<InstrItinData> DepHVXItinV55_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
+
+class DepHVXItinV60 {
+ list<InstrItinData> DepHVXItinV60_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
+
+class DepHVXItinV62 {
+ list<InstrItinData> DepHVXItinV62_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepIICScalar.td b/contrib/llvm/lib/Target/Hexagon/HexagonDepIICScalar.td
new file mode 100644
index 000000000000..261778bda724
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepIICScalar.td
@@ -0,0 +1,2504 @@
+//===--- HexagonDepIICScalar.td -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def tc_049dfb74 : InstrItinClass;
+def tc_0767081f : InstrItinClass;
+def tc_07ac815d : InstrItinClass;
+def tc_090485bb : InstrItinClass;
+def tc_09c86199 : InstrItinClass;
+def tc_09faec3b : InstrItinClass;
+def tc_0cb867f2 : InstrItinClass;
+def tc_1000eb10 : InstrItinClass;
+def tc_128719e8 : InstrItinClass;
+def tc_136c4786 : InstrItinClass;
+def tc_14da557c : InstrItinClass;
+def tc_1b6011fb : InstrItinClass;
+def tc_1b834fe7 : InstrItinClass;
+def tc_1e062b18 : InstrItinClass;
+def tc_1e69aa99 : InstrItinClass;
+def tc_1f9668cc : InstrItinClass;
+def tc_1fe8323c : InstrItinClass;
+def tc_20a8e109 : InstrItinClass;
+def tc_210b2456 : InstrItinClass;
+def tc_251c87b2 : InstrItinClass;
+def tc_261d9b78 : InstrItinClass;
+def tc_28d296df : InstrItinClass;
+def tc_29c14515 : InstrItinClass;
+def tc_2aaab1e0 : InstrItinClass;
+def tc_2c8fe5ae : InstrItinClass;
+def tc_2d1e6f5c : InstrItinClass;
+def tc_2e55aa16 : InstrItinClass;
+def tc_30665cb0 : InstrItinClass;
+def tc_336e698c : InstrItinClass;
+def tc_34e882a4 : InstrItinClass;
+def tc_35fb9d13 : InstrItinClass;
+def tc_37326008 : InstrItinClass;
+def tc_3993c58b : InstrItinClass;
+def tc_3b4892c6 : InstrItinClass;
+def tc_3bea1824 : InstrItinClass;
+def tc_3c10f809 : InstrItinClass;
+def tc_3d905451 : InstrItinClass;
+def tc_3e61d314 : InstrItinClass;
+def tc_3eab77bd : InstrItinClass;
+def tc_43068634 : InstrItinClass;
+def tc_45631a8d : InstrItinClass;
+def tc_47ab9233 : InstrItinClass;
+def tc_47f0b7ad : InstrItinClass;
+def tc_485bb57c : InstrItinClass;
+def tc_4997da4a : InstrItinClass;
+def tc_511f28f6 : InstrItinClass;
+def tc_537e2013 : InstrItinClass;
+def tc_53ee6546 : InstrItinClass;
+def tc_548f402d : InstrItinClass;
+def tc_5625c6c1 : InstrItinClass;
+def tc_580a779c : InstrItinClass;
+def tc_583510c7 : InstrItinClass;
+def tc_5d806107 : InstrItinClass;
+def tc_5fa2857c : InstrItinClass;
+def tc_5fe9fcd0 : InstrItinClass;
+def tc_6264c5e0 : InstrItinClass;
+def tc_639d93ee : InstrItinClass;
+def tc_63cd9d2d : InstrItinClass;
+def tc_65dc7cc4 : InstrItinClass;
+def tc_69bb508b : InstrItinClass;
+def tc_6c52d277 : InstrItinClass;
+def tc_6c576d46 : InstrItinClass;
+def tc_70cabf66 : InstrItinClass;
+def tc_7639d4b0 : InstrItinClass;
+def tc_7675c0e9 : InstrItinClass;
+def tc_76c4c5ef : InstrItinClass;
+def tc_77781686 : InstrItinClass;
+def tc_78b3c689 : InstrItinClass;
+def tc_7986ba30 : InstrItinClass;
+def tc_7bc567a7 : InstrItinClass;
+def tc_7c2dcd4d : InstrItinClass;
+def tc_7ca2ea10 : InstrItinClass;
+def tc_7d01cbdc : InstrItinClass;
+def tc_7d9a56cd : InstrItinClass;
+def tc_81a23d44 : InstrItinClass;
+def tc_821c4233 : InstrItinClass;
+def tc_82f0f122 : InstrItinClass;
+def tc_84630363 : InstrItinClass;
+def tc_86442910 : InstrItinClass;
+def tc_87601822 : InstrItinClass;
+def tc_88fa2da6 : InstrItinClass;
+def tc_8c8041e6 : InstrItinClass;
+def tc_8cb685d9 : InstrItinClass;
+def tc_8def9c57 : InstrItinClass;
+def tc_8f0a6bad : InstrItinClass;
+def tc_8fab9ac3 : InstrItinClass;
+def tc_92d1833c : InstrItinClass;
+def tc_94e6ffd9 : InstrItinClass;
+def tc_95c54f8b : InstrItinClass;
+def tc_9a13af9d : InstrItinClass;
+def tc_9b73d261 : InstrItinClass;
+def tc_9c18c9a5 : InstrItinClass;
+def tc_9c68db63 : InstrItinClass;
+def tc_9ce7a5ab : InstrItinClass;
+def tc_9da3628f : InstrItinClass;
+def tc_9dafb7d3 : InstrItinClass;
+def tc_9df8b0dc : InstrItinClass;
+def tc_9e86015f : InstrItinClass;
+def tc_9f518242 : InstrItinClass;
+def tc_a12a5971 : InstrItinClass;
+def tc_a1fb80e1 : InstrItinClass;
+def tc_a333d2a9 : InstrItinClass;
+def tc_a4567c39 : InstrItinClass;
+def tc_a87879e8 : InstrItinClass;
+def tc_a9c993d9 : InstrItinClass;
+def tc_aad55963 : InstrItinClass;
+def tc_ab1b5e74 : InstrItinClass;
+def tc_ae0722f7 : InstrItinClass;
+def tc_ae2c2dc2 : InstrItinClass;
+def tc_ae762521 : InstrItinClass;
+def tc_b08b653e : InstrItinClass;
+def tc_b08be45e : InstrItinClass;
+def tc_b0f50e3c : InstrItinClass;
+def tc_b189ad4c : InstrItinClass;
+def tc_b324366f : InstrItinClass;
+def tc_b5bfaa60 : InstrItinClass;
+def tc_b5f5a094 : InstrItinClass;
+def tc_b86c7e8b : InstrItinClass;
+def tc_baccf077 : InstrItinClass;
+def tc_bc5561d8 : InstrItinClass;
+def tc_bcf0e36e : InstrItinClass;
+def tc_bd16579e : InstrItinClass;
+def tc_be995eaf : InstrItinClass;
+def tc_bf6fa601 : InstrItinClass;
+def tc_c0cd91a8 : InstrItinClass;
+def tc_c14739d5 : InstrItinClass;
+def tc_c1dbc916 : InstrItinClass;
+def tc_c58f771a : InstrItinClass;
+def tc_c85212ca : InstrItinClass;
+def tc_c8f9a6f6 : InstrItinClass;
+def tc_ca280e8b : InstrItinClass;
+def tc_cbe45117 : InstrItinClass;
+def tc_cd321066 : InstrItinClass;
+def tc_d108a090 : InstrItinClass;
+def tc_d1b5a4b6 : InstrItinClass;
+def tc_d2609065 : InstrItinClass;
+def tc_d267fa19 : InstrItinClass;
+def tc_d2a33af5 : InstrItinClass;
+def tc_d63b71d1 : InstrItinClass;
+def tc_d6a805a8 : InstrItinClass;
+def tc_d95f4e98 : InstrItinClass;
+def tc_da79106e : InstrItinClass;
+def tc_dbe218dd : InstrItinClass;
+def tc_dcfee7ae : InstrItinClass;
+def tc_e17ce9ad : InstrItinClass;
+def tc_e2480a7f : InstrItinClass;
+def tc_e2c08bb4 : InstrItinClass;
+def tc_e2c31426 : InstrItinClass;
+def tc_e578178f : InstrItinClass;
+def tc_e836c161 : InstrItinClass;
+def tc_e8c7a357 : InstrItinClass;
+def tc_eb07ef6f : InstrItinClass;
+def tc_ecfaae86 : InstrItinClass;
+def tc_ef0ebaaa : InstrItinClass;
+def tc_ef2676fd : InstrItinClass;
+def tc_f027ebe9 : InstrItinClass;
+def tc_f055fbb6 : InstrItinClass;
+def tc_f1240c08 : InstrItinClass;
+def tc_f16d5b17 : InstrItinClass;
+def tc_f1aa2cdb : InstrItinClass;
+def tc_f26aa619 : InstrItinClass;
+def tc_f4608adc : InstrItinClass;
+def tc_faab1248 : InstrItinClass;
+def tc_fcee8723 : InstrItinClass;
+def tc_feb4974b : InstrItinClass;
+
+class DepScalarItinV4 {
+ list<InstrItinData> DepScalarItinV4_list = [
+ InstrItinData <tc_049dfb74, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_0767081f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_07ac815d, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_090485bb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09c86199, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09faec3b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_0cb867f2, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_1000eb10, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_128719e8, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_136c4786, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_14da557c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1b6011fb, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1b834fe7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e062b18, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e69aa99, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1f9668cc, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_1fe8323c, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_20a8e109, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_210b2456, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_251c87b2, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_261d9b78, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_28d296df, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_29c14515, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2aaab1e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2c8fe5ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2d1e6f5c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2e55aa16, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_30665cb0, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_336e698c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_34e882a4, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_35fb9d13, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_37326008, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3993c58b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3b4892c6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_3bea1824, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3c10f809, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3d905451, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_3e61d314, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3eab77bd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_43068634, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_45631a8d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_47ab9233, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_47f0b7ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_485bb57c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_4997da4a, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_511f28f6, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_537e2013, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_53ee6546, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_548f402d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5625c6c1, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_580a779c, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_583510c7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5d806107, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fa2857c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fe9fcd0, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6264c5e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_639d93ee, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_63cd9d2d, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_65dc7cc4, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_69bb508b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6c52d277, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_6c576d46, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_70cabf66, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7639d4b0, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7675c0e9, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_76c4c5ef, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_77781686, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_78b3c689, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7986ba30, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7bc567a7, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7c2dcd4d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7ca2ea10, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7d01cbdc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7d9a56cd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_81a23d44, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_821c4233, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_82f0f122, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_84630363, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_86442910, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_87601822, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_88fa2da6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8c8041e6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8cb685d9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8def9c57, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_8f0a6bad, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_8fab9ac3, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_92d1833c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_94e6ffd9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_95c54f8b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9a13af9d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9b73d261, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9c18c9a5, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9c68db63, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9ce7a5ab, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9da3628f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9dafb7d3, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9df8b0dc, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9e86015f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9f518242, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a12a5971, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a1fb80e1, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_a333d2a9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a4567c39, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_a87879e8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a9c993d9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_aad55963, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ab1b5e74, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae0722f7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae2c2dc2, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae762521, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b08b653e, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b08be45e, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b0f50e3c, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b189ad4c, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b324366f, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_b5bfaa60, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b5f5a094, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b86c7e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_baccf077, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_bc5561d8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_bcf0e36e, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_bd16579e, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_be995eaf, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_bf6fa601, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c0cd91a8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c14739d5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c1dbc916, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c58f771a, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c85212ca, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c8f9a6f6, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ca280e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_cbe45117, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_cd321066, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d108a090, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d1b5a4b6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d2609065, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d267fa19, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_d2a33af5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d63b71d1, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d6a805a8, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_d95f4e98, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_da79106e, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dbe218dd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dcfee7ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e17ce9ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2480a7f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e2c08bb4, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2c31426, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e578178f, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_e836c161, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e8c7a357, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_eb07ef6f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ecfaae86, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_ef0ebaaa, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ef2676fd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f027ebe9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f055fbb6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_f1240c08, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f16d5b17, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f1aa2cdb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f26aa619, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_f4608adc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_faab1248, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_fcee8723, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_feb4974b, [InstrStage<1, [SLOT3]>]> ];
+}
+
+class DepScalarItinV5 {
+ list<InstrItinData> DepScalarItinV5_list = [
+ InstrItinData <tc_049dfb74, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_0767081f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_07ac815d, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_090485bb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09c86199, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09faec3b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_0cb867f2, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_1000eb10, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_128719e8, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_136c4786, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_14da557c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1b6011fb, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1b834fe7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e062b18, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e69aa99, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1f9668cc, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_1fe8323c, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_20a8e109, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_210b2456, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_251c87b2, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_261d9b78, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_28d296df, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_29c14515, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2aaab1e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2c8fe5ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2d1e6f5c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2e55aa16, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_30665cb0, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_336e698c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_34e882a4, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_35fb9d13, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_37326008, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3993c58b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3b4892c6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_3bea1824, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3c10f809, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3d905451, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_3e61d314, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3eab77bd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_43068634, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_45631a8d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_47ab9233, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_47f0b7ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_485bb57c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_4997da4a, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_511f28f6, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_537e2013, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_53ee6546, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_548f402d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5625c6c1, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_580a779c, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_583510c7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5d806107, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fa2857c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fe9fcd0, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6264c5e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_639d93ee, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_63cd9d2d, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_65dc7cc4, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_69bb508b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6c52d277, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_6c576d46, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_70cabf66, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7639d4b0, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7675c0e9, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_76c4c5ef, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_77781686, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_78b3c689, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7986ba30, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7bc567a7, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7c2dcd4d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7ca2ea10, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7d01cbdc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7d9a56cd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_81a23d44, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_821c4233, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_82f0f122, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_84630363, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_86442910, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_87601822, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_88fa2da6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8c8041e6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8cb685d9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8def9c57, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_8f0a6bad, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_8fab9ac3, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_92d1833c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_94e6ffd9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_95c54f8b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9a13af9d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9b73d261, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9c18c9a5, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9c68db63, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9ce7a5ab, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9da3628f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9dafb7d3, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9df8b0dc, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9e86015f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9f518242, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a12a5971, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a1fb80e1, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_a333d2a9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a4567c39, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_a87879e8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a9c993d9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_aad55963, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ab1b5e74, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae0722f7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae2c2dc2, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae762521, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b08b653e, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b08be45e, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b0f50e3c, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b189ad4c, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b324366f, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_b5bfaa60, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b5f5a094, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b86c7e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_baccf077, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_bc5561d8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_bcf0e36e, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_bd16579e, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_be995eaf, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_bf6fa601, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c0cd91a8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c14739d5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c1dbc916, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c58f771a, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c85212ca, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c8f9a6f6, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ca280e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_cbe45117, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_cd321066, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d108a090, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d1b5a4b6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d2609065, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d267fa19, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_d2a33af5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d63b71d1, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d6a805a8, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_d95f4e98, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_da79106e, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dbe218dd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dcfee7ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e17ce9ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2480a7f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e2c08bb4, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2c31426, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e578178f, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_e836c161, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e8c7a357, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_eb07ef6f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ecfaae86, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_ef0ebaaa, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ef2676fd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f027ebe9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f055fbb6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_f1240c08, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f16d5b17, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f1aa2cdb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f26aa619, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_f4608adc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_faab1248, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_fcee8723, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_feb4974b, [InstrStage<1, [SLOT3]>]> ];
+}
+
+class DepScalarItinV55 {
+ list<InstrItinData> DepScalarItinV55_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 3, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
+
+class DepScalarItinV60 {
+ list<InstrItinData> DepScalarItinV60_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [2, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_3stall*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
+
+class DepScalarItinV62 {
+ list<InstrItinData> DepScalarItinV62_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [2, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_3*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_3stall*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_3*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.h b/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.h
index 331edaf5831d..be831b9501ea 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.h
@@ -15,38 +15,38 @@ enum Type {
TypeALU32_ADDI = 2,
TypeALU64 = 3,
TypeCJ = 4,
- TypeCOPROC_VMEM = 5,
- TypeCR = 7,
+ TypeCR = 6,
TypeCVI_HIST = 10,
TypeCVI_VA = 16,
TypeCVI_VA_DV = 17,
TypeCVI_VINLANESAT = 18,
- TypeCVI_VM_LD = 20,
- TypeCVI_VM_NEW_ST = 21,
- TypeCVI_VM_ST = 22,
- TypeCVI_VM_STU = 23,
- TypeCVI_VM_TMP_LD = 24,
- TypeCVI_VM_VP_LDU = 25,
- TypeCVI_VP = 26,
- TypeCVI_VP_VS = 27,
- TypeCVI_VS = 28,
- TypeCVI_VX = 30,
- TypeCVI_VX_DV = 31,
- TypeDUPLEX = 32,
- TypeENDLOOP = 33,
- TypeEXTENDER = 34,
- TypeJ = 35,
- TypeLD = 36,
- TypeM = 37,
- TypeMAPPING = 38,
- TypeNCJ = 39,
- TypePSEUDO = 40,
- TypeST = 41,
- TypeSUBINSN = 42,
- TypeS_2op = 43,
- TypeS_3op = 44,
- TypeV2LDST = 47,
- TypeV4LDST = 48
+ TypeCVI_VM_LD = 19,
+ TypeCVI_VM_NEW_ST = 20,
+ TypeCVI_VM_ST = 21,
+ TypeCVI_VM_STU = 22,
+ TypeCVI_VM_TMP_LD = 23,
+ TypeCVI_VM_VP_LDU = 24,
+ TypeCVI_VP = 25,
+ TypeCVI_VP_VS = 26,
+ TypeCVI_VS = 27,
+ TypeCVI_VX = 29,
+ TypeCVI_VX_DV = 30,
+ TypeCVI_VX_LATE = 31,
+ TypeDUPLEX = 33,
+ TypeENDLOOP = 34,
+ TypeEXTENDER = 35,
+ TypeJ = 36,
+ TypeLD = 37,
+ TypeM = 38,
+ TypeMAPPING = 39,
+ TypeNCJ = 40,
+ TypePSEUDO = 41,
+ TypeST = 42,
+ TypeSUBINSN = 43,
+ TypeS_2op = 44,
+ TypeS_3op = 45,
+ TypeV2LDST = 48,
+ TypeV4LDST = 49
};
}
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.td b/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.td
index b35f7ba6d2ab..ac1989e4dd82 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepITypes.td
@@ -13,35 +13,35 @@ def TypeALU32_3op : IType<1>;
def TypeALU32_ADDI : IType<2>;
def TypeALU64 : IType<3>;
def TypeCJ : IType<4>;
-def TypeCOPROC_VMEM : IType<5>;
-def TypeCR : IType<7>;
+def TypeCR : IType<6>;
def TypeCVI_HIST : IType<10>;
def TypeCVI_VA : IType<16>;
def TypeCVI_VA_DV : IType<17>;
def TypeCVI_VINLANESAT : IType<18>;
-def TypeCVI_VM_LD : IType<20>;
-def TypeCVI_VM_NEW_ST : IType<21>;
-def TypeCVI_VM_ST : IType<22>;
-def TypeCVI_VM_STU : IType<23>;
-def TypeCVI_VM_TMP_LD : IType<24>;
-def TypeCVI_VM_VP_LDU : IType<25>;
-def TypeCVI_VP : IType<26>;
-def TypeCVI_VP_VS : IType<27>;
-def TypeCVI_VS : IType<28>;
-def TypeCVI_VX : IType<30>;
-def TypeCVI_VX_DV : IType<31>;
-def TypeDUPLEX : IType<32>;
-def TypeENDLOOP : IType<33>;
-def TypeEXTENDER : IType<34>;
-def TypeJ : IType<35>;
-def TypeLD : IType<36>;
-def TypeM : IType<37>;
-def TypeMAPPING : IType<38>;
-def TypeNCJ : IType<39>;
-def TypePSEUDO : IType<40>;
-def TypeST : IType<41>;
-def TypeSUBINSN : IType<42>;
-def TypeS_2op : IType<43>;
-def TypeS_3op : IType<44>;
-def TypeV2LDST : IType<47>;
-def TypeV4LDST : IType<48>;
+def TypeCVI_VM_LD : IType<19>;
+def TypeCVI_VM_NEW_ST : IType<20>;
+def TypeCVI_VM_ST : IType<21>;
+def TypeCVI_VM_STU : IType<22>;
+def TypeCVI_VM_TMP_LD : IType<23>;
+def TypeCVI_VM_VP_LDU : IType<24>;
+def TypeCVI_VP : IType<25>;
+def TypeCVI_VP_VS : IType<26>;
+def TypeCVI_VS : IType<27>;
+def TypeCVI_VX : IType<29>;
+def TypeCVI_VX_DV : IType<30>;
+def TypeCVI_VX_LATE : IType<31>;
+def TypeDUPLEX : IType<33>;
+def TypeENDLOOP : IType<34>;
+def TypeEXTENDER : IType<35>;
+def TypeJ : IType<36>;
+def TypeLD : IType<37>;
+def TypeM : IType<38>;
+def TypeMAPPING : IType<39>;
+def TypeNCJ : IType<40>;
+def TypePSEUDO : IType<41>;
+def TypeST : IType<42>;
+def TypeSUBINSN : IType<43>;
+def TypeS_2op : IType<44>;
+def TypeS_3op : IType<45>;
+def TypeV2LDST : IType<48>;
+def TypeV4LDST : IType<49>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrFormats.td b/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrFormats.td
index d7a99f48803b..1b24be477158 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrFormats.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrFormats.td
@@ -7,233 +7,140 @@
//
//===----------------------------------------------------------------------===//
-class Enc_12122225 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <3> Qd8;
- let Inst{2-0} = Qd8{2-0};
-}
-class Enc_16626097 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_13397056 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_7315939 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{24-22} = n1{4-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_15275738 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-5} = Ii{9-1};
+class Enc_890909 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12822813 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
bits <2> Pe4;
let Inst{6-5} = Pe4{1-0};
}
-class Enc_10282127 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
+class Enc_527412 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_efaed8 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{8-8} = Ii{0-0};
+}
+class Enc_a568d4 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_14264243 : OpcodeHexagon {
+class Enc_27b757 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+}
+class Enc_5de85f : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{11-8} = Rt16{3-0};
-}
-class Enc_6778937 : OpcodeHexagon {
- bits <5> Rxx32;
- let Inst{20-16} = Rxx32{4-0};
- bits <0> sgp10;
-}
-class Enc_5480539 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
}
-class Enc_11422009 : OpcodeHexagon {
+class Enc_0e41fa : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_16357011 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{8-4} = Vv32{4-0};
- bits <5> Vt32;
- let Inst{13-9} = Vt32{4-0};
- bits <4> Vdd16;
- let Inst{3-0} = Vdd16{3-0};
-}
-class Enc_4975051 : OpcodeHexagon {
- bits <19> Ii;
- let Inst{26-25} = Ii{18-17};
- let Inst{20-16} = Ii{16-12};
- let Inst{13-5} = Ii{11-3};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_14786238 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_15472748 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_6773159 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_12535811 : OpcodeHexagon {
+class Enc_802dc0 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{8-8} = Ii{0-0};
bits <2> Qv4;
let Inst{23-22} = Qv4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
}
-class Enc_14007201 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <8> II;
- let Inst{22-16} = II{7-1};
- let Inst{13-13} = II{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_6b197f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2577026 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{2-0} = Qt8{2-0};
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_1f5d8f : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_7305764 : OpcodeHexagon {
- bits <5> II;
- let Inst{12-8} = II{4-0};
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
+class Enc_51436c : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{23-22} = Ii{15-14};
+ let Inst{13-0} = Ii{13-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_11682941 : OpcodeHexagon {
- bits <19> Ii;
- let Inst{26-25} = Ii{18-17};
- let Inst{20-16} = Ii{16-12};
- let Inst{13-13} = Ii{11-11};
- let Inst{7-0} = Ii{10-3};
+class Enc_c7a204 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_16376009 : OpcodeHexagon {
+class Enc_db40cd : OpcodeHexagon {
bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{6-3} = Ii{5-2};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13249928 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{13-5} = Ii{8-0};
+class Enc_a1e29d : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> II;
+ let Inst{22-21} = II{4-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_1971351 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
+class Enc_d15d19 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13715847 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_e90a15 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{22-22} = n1{0-0};
}
-class Enc_13303422 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
+class Enc_e0a47a : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
bits <5> Rd32;
@@ -241,29 +148,32 @@ class Enc_13303422 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14574598 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_140c83 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_13094118 : OpcodeHexagon {
- bits <5> Css32;
- let Inst{20-16} = Css32{4-0};
+class Enc_7eee72 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4231995 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_d7dc10 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_844699 : OpcodeHexagon {
+class Enc_736575 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
@@ -271,74 +181,87 @@ class Enc_844699 : OpcodeHexagon {
let Inst{19-16} = Rs16{3-0};
bits <4> n1;
let Inst{28-28} = n1{3-3};
- let Inst{24-22} = n1{2-0};
+ let Inst{25-23} = n1{2-0};
}
-class Enc_8752140 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
+class Enc_8dec2e : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_eaa9f8 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <2> Qx4;
+ let Inst{1-0} = Qx4{1-0};
+}
+class Enc_509701 : OpcodeHexagon {
+ bits <19> Ii;
+ let Inst{26-25} = Ii{18-17};
+ let Inst{20-16} = Ii{16-12};
+ let Inst{13-5} = Ii{11-3};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_7978128 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{8-8} = Ii{0-0};
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
+class Enc_830e5d : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <8> II;
+ let Inst{22-16} = II{7-1};
+ let Inst{13-13} = II{0-0};
+ bits <2> Pu4;
+ let Inst{24-23} = Pu4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_10492541 : OpcodeHexagon {
+class Enc_79b8c8 : OpcodeHexagon {
bits <6> Ii;
let Inst{6-3} = Ii{5-2};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_0 : OpcodeHexagon {
-}
-class Enc_15733946 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_738356 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
+class Enc_58a8bf : OpcodeHexagon {
+ bits <3> Ii;
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14400220 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{9-5} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+class Enc_041d7b : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-23} = n1{3-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_15194851 : OpcodeHexagon {
+class Enc_f44229 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{13-13} = Ii{6-6};
+ let Inst{7-3} = Ii{5-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_14172170 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{5-5} = Ii{0-0};
+class Enc_aad80c : OpcodeHexagon {
bits <5> Vuu32;
let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
@@ -346,413 +269,269 @@ class Enc_14172170 : OpcodeHexagon {
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_10065510 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
+class Enc_87c142 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-4} = Ii{6-2};
+ bits <4> Rt16;
+ let Inst{3-0} = Rt16{3-0};
+}
+class Enc_86a14b : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{7-3} = Ii{7-3};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
+}
+class Enc_9a33d5 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14998517 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <3> n1;
- let Inst{29-29} = n1{2-2};
- let Inst{26-25} = n1{1-0};
+class Enc_a56825 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_16657398 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+class Enc_9ea4cf : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_14620934 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_ee5ed0 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+ bits <2> n1;
+ let Inst{9-8} = n1{1-0};
}
-class Enc_10075393 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_935d9b : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
-}
-class Enc_8638014 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13261538 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+class Enc_61f0b0 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_8990840 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-5} = Ii{10-2};
+class Enc_bd6011 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_5974204 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_65d691 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_4711514 : OpcodeHexagon {
- bits <2> Qu4;
- let Inst{9-8} = Qu4{1-0};
+class Enc_e8c45e : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{13-13} = Ii{6-6};
+ let Inst{7-3} = Ii{5-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_11492529 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
+class Enc_ca3887 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_9277990 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_a94f3b : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_6690615 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-4} = Ii{6-2};
+class Enc_625deb : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{10-8} = Ii{3-1};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
bits <4> Rt16;
let Inst{3-0} = Rt16{3-0};
}
-class Enc_1220199 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_7785569 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-22} = n1{4-1};
- let Inst{8-8} = n1{0-0};
+class Enc_1f5ba6 : OpcodeHexagon {
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2880796 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
+class Enc_cd82bc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{21-21} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <6> II;
+ let Inst{13-8} = II{5-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_6858527 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{4-0} = Vv32{4-0};
-}
-class Enc_11863656 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_151014 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <2> Px4;
- let Inst{6-5} = Px4{1-0};
-}
-class Enc_10333841 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_399e12 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
}
-class Enc_14044877 : OpcodeHexagon {
+class Enc_d7a65e : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-13} = Ii{5-5};
- let Inst{7-3} = Ii{4-0};
+ let Inst{12-7} = Ii{5-0};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_13691337 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <2> Qx4;
- let Inst{6-5} = Qx4{1-0};
}
-class Enc_3817033 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <3> Qt8;
- let Inst{10-8} = Qt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_607661 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3540372 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_6a5972 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> Rt16;
+ let Inst{11-8} = Rt16{3-0};
}
-class Enc_5200852 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_53dca9 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{11-8} = Ii{5-2};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_15949334 : OpcodeHexagon {
+class Enc_27fd0e : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_3831744 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_8280533 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_10969213 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_3974695 : OpcodeHexagon {
+class Enc_93af4c : OpcodeHexagon {
bits <7> Ii;
let Inst{10-4} = Ii{6-0};
bits <4> Rx16;
let Inst{3-0} = Rx16{3-0};
}
-class Enc_7255914 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_5bdd42 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-5} = Ii{6-3};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7212930 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_71f1b4 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_12781442 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
-}
-class Enc_799555 : OpcodeHexagon {
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_11083408 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_900013 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_9487067 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{19-16} = Ii{11-8};
- let Inst{12-5} = Ii{7-0};
- bits <2> Pu4;
- let Inst{22-21} = Pu4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_16014536 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_12419313 : OpcodeHexagon {
+class Enc_14640c : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{24-23} = n1{2-1};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-22} = n1{3-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_5503430 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_14767681 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_9093094 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <8> II;
- let Inst{22-16} = II{7-1};
- let Inst{13-13} = II{0-0};
- bits <2> Pu4;
- let Inst{24-23} = Pu4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_11542684 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{27-21} = Ii{15-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_8877260 : OpcodeHexagon {
+class Enc_31db33 : OpcodeHexagon {
+ bits <2> Qt4;
+ let Inst{6-5} = Qt4{1-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_1737833 : OpcodeHexagon {
+class Enc_65f095 : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-13} = Ii{5-5};
- let Inst{7-3} = Ii{4-0};
+ let Inst{6-3} = Ii{5-2};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_255516 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_784502 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_10721363 : OpcodeHexagon {
+class Enc_6413b6 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{23-23} = n1{1-1};
+ let Inst{13-13} = n1{0-0};
+}
+class Enc_7a0ea6 : OpcodeHexagon {
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+ bits <1> n1;
+ let Inst{9-9} = n1{0-0};
+}
+class Enc_84bff1 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -760,90 +539,138 @@ class Enc_10721363 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_7076358 : OpcodeHexagon {
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
+class Enc_74aef2 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_11930928 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_78e566 : OpcodeHexagon {
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_2410156 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
+class Enc_437f33 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_6735062 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
+class Enc_0527db : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rx16;
+ let Inst{3-0} = Rx16{3-0};
+}
+class Enc_420cf3 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{12-8} = Rd32{4-0};
}
-class Enc_7965855 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_e39bb2 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{9-4} = Ii{5-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_5202340 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vyy32;
- let Inst{4-0} = Vyy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_1b64fb : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_10568534 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <2> Pu4;
- let Inst{22-21} = Pu4{1-0};
+class Enc_c6220b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Ru32;
+ let Inst{12-8} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{2-0} = Nt8{2-0};
}
-class Enc_16730127 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_322e1b : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
let Inst{7-5} = Ii{2-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_11224149 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{13-13} = Ii{7-7};
- let Inst{7-3} = Ii{6-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ bits <6> II;
+ let Inst{23-23} = II{5-5};
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{12-8} = Rd32{4-0};
+}
+class Enc_989021 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vy32;
+ let Inst{12-8} = Vy32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
+}
+class Enc_178717 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-23} = n1{4-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
+}
+class Enc_78cbf0 : OpcodeHexagon {
+ bits <18> Ii;
+ let Inst{26-25} = Ii{17-16};
+ let Inst{20-16} = Ii{15-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_9772987 : OpcodeHexagon {
+class Enc_052c7d : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_fcf7a7 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_55355c : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -854,342 +681,259 @@ class Enc_9772987 : OpcodeHexagon {
bits <5> Rtt32;
let Inst{4-0} = Rtt32{4-0};
}
-class Enc_9238139 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_2082775 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_5790679 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{12-8} = Ii{8-4};
- let Inst{4-3} = Ii{3-2};
+class Enc_211aaa : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_9305257 : OpcodeHexagon {
- bits <5> Zu8;
- let Inst{12-8} = Zu8{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_3735566 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_12654528 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
+class Enc_6185fe : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{4-0} = Vvv32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_15290236 : OpcodeHexagon {
+class Enc_cd4705 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_11139981 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_2ebe3b : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_3d5b28 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_15546666 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-8} = Ii{8-6};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_5ab2be : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_486163 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
+class Enc_fef969 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{5-5} = Ii{0-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_2079016 : OpcodeHexagon {
+class Enc_63eaeb : OpcodeHexagon {
bits <2> Ii;
let Inst{1-0} = Ii{1-0};
bits <4> Rs16;
let Inst{7-4} = Rs16{3-0};
}
-class Enc_10095813 : OpcodeHexagon {
+class Enc_95441f : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_13133322 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_9422954 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_10642833 : OpcodeHexagon {
+class Enc_372c9d : OpcodeHexagon {
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14989332 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+class Enc_4dff07 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{4-0} = Vv32{4-0};
-}
-class Enc_10263630 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13937564 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_04c959 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_7171569 : OpcodeHexagon {
+class Enc_b62ef7 : OpcodeHexagon {
bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ let Inst{10-8} = Ii{2-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2702036 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_2b518f : OpcodeHexagon {
+ bits <32> Ii;
+ let Inst{27-16} = Ii{31-20};
+ let Inst{13-0} = Ii{19-6};
}
-class Enc_1928953 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
+class Enc_b388cf : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> II;
+ let Inst{22-21} = II{4-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_5853469 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_ad1c74 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+}
+class Enc_74d4e5 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_7692963 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_c90aca : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_15140689 : OpcodeHexagon {
+class Enc_222336 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_5e87ce : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{23-22} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_f7ea77 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_748676 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-0} = Ii{8-1};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
-}
-class Enc_3372766 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_7900405 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_245865 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_11930027 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-5} = Ii{9-1};
+class Enc_88d4d9 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{9-8} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
}
-class Enc_971574 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <6> II;
- let Inst{23-23} = II{5-5};
- let Inst{4-0} = II{4-0};
+class Enc_c0cdde : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
-}
-class Enc_13453446 : OpcodeHexagon {
- bits <24> Ii;
- let Inst{24-16} = Ii{23-15};
- let Inst{13-1} = Ii{14-2};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_6356866 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
+class Enc_226535 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_16246706 : OpcodeHexagon {
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_5326450 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_31aa6a : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_11687333 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_2771456 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_11282123 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
+class Enc_397f23 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{13-13} = Ii{7-7};
+ let Inst{7-3} = Ii{6-2};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
-}
-class Enc_518319 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{5-5} = Ii{0-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
}
-class Enc_16104442 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_7912540 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
-}
-class Enc_15560488 : OpcodeHexagon {
+class Enc_865390 : OpcodeHexagon {
bits <3> Ii;
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7581852 : OpcodeHexagon {
+class Enc_98c0b8 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
@@ -1197,144 +941,139 @@ class Enc_7581852 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_10030031 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_3915770 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_bfbf03 : OpcodeHexagon {
+ bits <2> Qs4;
+ let Inst{9-8} = Qs4{1-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_4075554 : OpcodeHexagon {
+class Enc_ecbcc8 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+}
+class Enc_f5e933 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_11326438 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_3fc427 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_4050532 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+class Enc_01d3d0 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_14461004 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
+class Enc_b0e9d8 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_13344657 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{8-8} = Ii{0-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_13114546 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{5-5} = Ii{0-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+class Enc_3694bd : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{23-22} = n1{1-0};
}
-class Enc_14530015 : OpcodeHexagon {
+class Enc_a42857 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-23} = n1{4-2};
- let Inst{13-13} = n1{1-1};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-22} = n1{3-1};
let Inst{8-8} = n1{0-0};
}
-class Enc_5967898 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
+class Enc_b7fad3 : OpcodeHexagon {
bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
+ let Inst{9-8} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_223005 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{6-3} = Ii{5-2};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_15450971 : OpcodeHexagon {
+class Enc_9e4c3f : OpcodeHexagon {
+ bits <6> II;
+ let Inst{13-8} = II{5-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-22} = n1{4-1};
- let Inst{13-13} = n1{0-0};
+ bits <4> Rd16;
+ let Inst{19-16} = Rd16{3-0};
}
-class Enc_15536400 : OpcodeHexagon {
+class Enc_8b8d61 : OpcodeHexagon {
bits <6> Ii;
- let Inst{3-0} = Ii{5-2};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rd32;
+ let Inst{12-8} = Rd32{4-0};
}
-class Enc_1291652 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{8-8} = Ii{0-0};
+class Enc_88c16c : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_5636753 : OpcodeHexagon {
+class Enc_770858 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{6-5} = Ps4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_5757366 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+class Enc_bd811a : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Cd32;
+ let Inst{4-0} = Cd32{4-0};
}
-class Enc_9752128 : OpcodeHexagon {
+class Enc_b05839 : OpcodeHexagon {
bits <7> Ii;
let Inst{8-5} = Ii{6-3};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13618890 : OpcodeHexagon {
+class Enc_bc03e5 : OpcodeHexagon {
bits <17> Ii;
let Inst{26-25} = Ii{16-15};
let Inst{20-16} = Ii{14-10};
@@ -1343,33 +1082,7 @@ class Enc_13618890 : OpcodeHexagon {
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_5890213 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_5582416 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_13536408 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{3-0} = Ii{3-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
-}
-class Enc_9773189 : OpcodeHexagon {
+class Enc_412ff0 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Ru32;
@@ -1377,420 +1090,547 @@ class Enc_9773189 : OpcodeHexagon {
bits <5> Rxx32;
let Inst{12-8} = Rxx32{4-0};
}
-class Enc_2152247 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
+class Enc_c9a18e : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_be32a5 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_e6abcf : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_12848507 : OpcodeHexagon {
+class Enc_6339d5 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
+ let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ let Inst{12-8} = Ru32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_16279406 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
+class Enc_d6990d : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_1734121 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{10-8} = Ii{3-1};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{3-0} = Rt16{3-0};
+class Enc_6c9440 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_766909 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_0d8adb : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
-}
-class Enc_4527648 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_8849208 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
+class Enc_50e578 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_1cf4ca : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_48b75f : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_9894557 : OpcodeHexagon {
+class Enc_b97f71 : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <6> II;
- let Inst{23-21} = II{5-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
+ let Inst{8-5} = Ii{5-2};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_9d1247 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-5} = Ii{6-3};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4109168 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
+class Enc_f4413a : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14560494 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_f7430e : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+}
+class Enc_e7581c : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_9773167 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_2301d6 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{8-8} = Ii{0-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_1898420 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
+class Enc_c31910 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{23-21} = Ii{7-5};
+ let Inst{13-13} = Ii{4-4};
+ let Inst{7-5} = Ii{3-1};
+ let Inst{3-3} = Ii{0-0};
+ bits <5> II;
+ let Inst{12-8} = II{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_11498120 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_2f2f04 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{5-5} = Ii{0-0};
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_15459921 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_8d8a30 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10058269 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_10197700 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_12608570 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{26-25} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-5} = Ii{9-1};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4804090 : OpcodeHexagon {
- bits <6> Ss64;
- let Inst{21-16} = Ss64{5-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14973146 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_5718302 : OpcodeHexagon {
+class Enc_2d7491 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-5} = Ii{10-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_2103742 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
+class Enc_a803e0 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
}
-class Enc_7564330 : OpcodeHexagon {
+class Enc_45364e : OpcodeHexagon {
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_2176383 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{9-4} = Ii{5-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_b909d2 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <7> n1;
+ let Inst{28-28} = n1{6-6};
+ let Inst{25-22} = n1{5-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_7736768 : OpcodeHexagon {
+class Enc_e6c957 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_fa3ba4 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{26-25} = Ii{13-12};
+ let Inst{13-5} = Ii{11-3};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_0d8870 : OpcodeHexagon {
bits <12> Ii;
let Inst{26-25} = Ii{11-10};
let Inst{13-13} = Ii{9-9};
let Inst{7-0} = Ii{8-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_13189194 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{5-5} = Ii{0-0};
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+class Enc_9fae8a : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_5154851 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_18c338 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <8> II;
+ let Inst{22-16} = II{7-1};
+ let Inst{13-13} = II{0-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_5ccba9 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_1329520 : OpcodeHexagon {
+class Enc_0ed752 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Cdd32;
let Inst{4-0} = Cdd32{4-0};
}
-class Enc_14057553 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9223889 : OpcodeHexagon {
+class Enc_143445 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
+}
+class Enc_3a3d62 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_3e3989 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-22} = n1{4-1};
+ let Inst{8-8} = n1{0-0};
+}
+class Enc_152467 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_daea09 : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{23-22} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <2> Pu4;
+ let Inst{9-8} = Pu4{1-0};
}
-class Enc_10979813 : OpcodeHexagon {
+class Enc_f37377 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+}
+class Enc_a198f6 : OpcodeHexagon {
bits <7> Ii;
- let Inst{13-13} = Ii{6-6};
- let Inst{7-3} = Ii{5-1};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{10-5} = Ii{6-1};
+ bits <2> Pt4;
+ let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_13490067 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{2-0} = Qt8{2-0};
+class Enc_3dac0b : OpcodeHexagon {
+ bits <2> Qt4;
+ let Inst{6-5} = Qt4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_10076500 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_163381 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{26-25} = Ii{13-12};
- let Inst{13-5} = Ii{11-3};
+class Enc_e38e1f : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <2> Pu4;
+ let Inst{22-21} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_10328975 : OpcodeHexagon {
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_f8ecf9 : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Vvv32;
+ let Inst{20-16} = Vvv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_14939491 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
+class Enc_7f1a05 : OpcodeHexagon {
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ry32;
+ let Inst{12-8} = Ry32{4-0};
+}
+class Enc_2df31d : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{9-4} = Ii{7-2};
bits <4> Rd16;
let Inst{3-0} = Rd16{3-0};
}
-class Enc_8891794 : OpcodeHexagon {
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_7723767 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_25bef0 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_2639299 : OpcodeHexagon {
+class Enc_f82302 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{11-8} = Rd16{3-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{23-23} = n1{0-0};
}
-class Enc_11552785 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_83ee64 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_11849200 : OpcodeHexagon {
+class Enc_adf111 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <2> Qx4;
+ let Inst{1-0} = Qx4{1-0};
+}
+class Enc_46c951 : OpcodeHexagon {
bits <6> Ii;
let Inst{12-7} = Ii{5-0};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
}
-class Enc_14868535 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{23-22} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-1} = Ii{8-2};
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
+class Enc_5d6c34 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_4df4e9 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_48594 : OpcodeHexagon {
+class Enc_91b9fe : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6608821 : OpcodeHexagon {
- bits <4> Ii;
+class Enc_a7b8e8 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
-}
-class Enc_11049656 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{13-13} = Ii{8-8};
- let Inst{7-3} = Ii{7-3};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{7-5} = Ii{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_2b3f60 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <2> Px4;
+ let Inst{6-5} = Px4{1-0};
}
-class Enc_117962 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{23-21} = Ii{7-5};
- let Inst{13-13} = Ii{4-4};
- let Inst{7-5} = Ii{3-1};
- let Inst{3-3} = Ii{0-0};
- bits <5> II;
- let Inst{12-8} = II{4-0};
+class Enc_bd1cbc : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5900401 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_a30110 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_36641 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
+class Enc_f3f408 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_9626139 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
+class Enc_690862 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+}
+class Enc_2a3787 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-5} = Ii{10-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_11971407 : OpcodeHexagon {
+class Enc_d5c73f : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_3f97c8 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{6-3} = Ii{5-2};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_d50cd3 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_729ff7 : OpcodeHexagon {
bits <3> Ii;
let Inst{7-5} = Ii{2-0};
bits <5> Rtt32;
@@ -1800,37 +1640,32 @@ class Enc_11971407 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_9852473 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-5} = Ii{10-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_217147 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+}
+class Enc_b9c5fb : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_6495334 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
+class Enc_f394d3 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_1186018 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{26-25} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-0} = Ii{8-1};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_0cb018 : OpcodeHexagon {
+ bits <5> Cs32;
+ let Inst{20-16} = Cs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15999208 : OpcodeHexagon {
+class Enc_541f26 : OpcodeHexagon {
bits <18> Ii;
let Inst{26-25} = Ii{17-16};
let Inst{20-16} = Ii{15-11};
@@ -1839,446 +1674,302 @@ class Enc_15999208 : OpcodeHexagon {
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_11477246 : OpcodeHexagon {
+class Enc_724154 : OpcodeHexagon {
bits <6> II;
let Inst{5-0} = II{5-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Re32;
let Inst{20-16} = Re32{4-0};
}
-class Enc_7971062 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{23-22} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4327792 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+class Enc_179b35 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_10326434 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_585242 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-3} = Ii{4-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_1572239 : OpcodeHexagon {
- bits <2> Qt4;
- let Inst{6-5} = Qt4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
}
-class Enc_6372758 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+class Enc_cf1927 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_15793331 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
+class Enc_b84c4c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <6> II;
+ let Inst{23-21} = II{5-3};
+ let Inst{7-5} = II{2-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11424254 : OpcodeHexagon {
- bits <2> Qt4;
- let Inst{6-5} = Qt4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+class Enc_9ac432 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Pu4;
+ let Inst{7-6} = Pu4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_4983213 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{10-0} = Ii{13-3};
+class Enc_8203bb : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
}
-class Enc_16035138 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
-}
-class Enc_8225953 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{13-13} = Ii{7-7};
- let Inst{7-3} = Ii{6-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+class Enc_e66a97 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_4397470 : OpcodeHexagon {
- bits <5> II;
- let Inst{12-8} = II{4-0};
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
}
-class Enc_1004392 : OpcodeHexagon {
+class Enc_8c2412 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{6-5} = Ps4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
-}
-class Enc_16319737 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{26-25} = Ii{13-12};
- let Inst{13-13} = Ii{11-11};
- let Inst{7-0} = Ii{10-3};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
-}
-class Enc_2296022 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9664427 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <3> Qss8;
- let Inst{2-0} = Qss8{2-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_877823 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
+class Enc_284ebb : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_1589406 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+class Enc_733b27 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6900405 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+class Enc_22c845 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{10-0} = Ii{13-3};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14150875 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-22} = n1{3-0};
-}
-class Enc_15707793 : OpcodeHexagon {
+class Enc_9b0bc1 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Gd32;
- let Inst{4-0} = Gd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_14689096 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
+class Enc_ea4c54 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_9915754 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_b72622 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{5-5} = Ii{0-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_7470998 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <2> Qx4;
- let Inst{1-0} = Qx4{1-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_11471622 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_569cfe : OpcodeHexagon {
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_14363183 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_15816255 : OpcodeHexagon {
+class Enc_96ce4f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5321335 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <4> Vdd16;
- let Inst{7-4} = Vdd16{3-0};
-}
-class Enc_12702821 : OpcodeHexagon {
+class Enc_143a3c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <6> II;
+ let Inst{23-21} = II{5-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
bits <5> Rxx32;
let Inst{4-0} = Rxx32{4-0};
}
-class Enc_449439 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-5} = Ii{8-0};
+class Enc_57a33e : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-3} = Ii{7-3};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_2054304 : OpcodeHexagon {
+class Enc_311abd : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <6> Sd64;
- let Inst{5-0} = Sd64{5-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_236434 : OpcodeHexagon {
+class Enc_a1640c : OpcodeHexagon {
bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
-}
-class Enc_5598813 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_8409782 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
+class Enc_de0214 : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-5} = Ii{9-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15182416 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{8-8} = Ii{0-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_a90628 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_4501395 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_fda92c : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{26-25} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-0} = Ii{8-1};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_831a7d : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6039436 : OpcodeHexagon {
- bits <3> Qtt8;
- let Inst{2-0} = Qtt8{2-0};
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_476163 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_11a146 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_11281763 : OpcodeHexagon {
+class Enc_b15941 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9929262 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
-}
-class Enc_13174858 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8437395 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_16578332 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-8} = Ii{8-6};
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_12829314 : OpcodeHexagon {
+class Enc_b78edd : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-23} = n1{2-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_9744403 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{13-9} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{8-4} = Vv32{4-0};
- bits <4> Vdd16;
- let Inst{3-0} = Vdd16{3-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10968391 : OpcodeHexagon {
+class Enc_a27588 : OpcodeHexagon {
bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <7> n1;
- let Inst{28-28} = n1{6-6};
- let Inst{25-22} = n1{5-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_64199 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-4} = Ii{6-2};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_2a7b91 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{8-8} = Ii{0-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11039423 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
+class Enc_b43b67 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <2> Qx4;
+ let Inst{6-5} = Qx4{1-0};
}
-class Enc_6730375 : OpcodeHexagon {
+class Enc_4aca3a : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
+ bits <3> n1;
+ let Inst{29-29} = n1{2-2};
+ let Inst{26-25} = n1{1-0};
}
-class Enc_16213761 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_13204995 : OpcodeHexagon {
+class Enc_b38ffc : OpcodeHexagon {
bits <4> Ii;
let Inst{11-8} = Ii{3-0};
bits <4> Rs16;
@@ -2286,79 +1977,26 @@ class Enc_13204995 : OpcodeHexagon {
bits <4> Rt16;
let Inst{3-0} = Rt16{3-0};
}
-class Enc_13338314 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_9920336 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <5> Rtt32;
- let Inst{4-0} = Rtt32{4-0};
-}
-class Enc_15380240 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_cda00a : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{19-16} = Ii{11-8};
+ let Inst{12-5} = Ii{7-0};
+ bits <2> Pu4;
+ let Inst{22-21} = Pu4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3296020 : OpcodeHexagon {
+class Enc_2fbf3c : OpcodeHexagon {
bits <3> Ii;
let Inst{10-8} = Ii{2-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_2428539 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{24-23} = n1{2-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_10039393 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9372046 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2901241 : OpcodeHexagon {
+class Enc_70b24b : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
@@ -2366,424 +2004,294 @@ class Enc_2901241 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_16145290 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{6-5} = Ps4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_13783220 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_12261611 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6135183 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rx16;
- let Inst{3-0} = Rx16{3-0};
-}
-class Enc_5523416 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_2ae154 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_13472494 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_50b5ac : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_16303398 : OpcodeHexagon {
+class Enc_2ea740 : OpcodeHexagon {
bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_3494181 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+}
+class Enc_08d755 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_13983714 : OpcodeHexagon {
+class Enc_1178da : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_931653 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-5} = Ii{6-3};
+class Enc_8dbe85 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7622936 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_5a18b3 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{22-22} = n1{1-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_8773155 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
+class Enc_14d27a : OpcodeHexagon {
bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
-}
-class Enc_5401217 : OpcodeHexagon {
+ let Inst{12-8} = II{4-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <3> n1;
- let Inst{28-28} = n1{2-2};
- let Inst{24-23} = n1{1-0};
}
-class Enc_6736678 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
+class Enc_a05677 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3457570 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
+class Enc_f0cca7 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <6> II;
+ let Inst{20-16} = II{5-1};
+ let Inst{13-13} = II{0-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_500cb0 : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
bits <5> Vxx32;
let Inst{4-0} = Vxx32{4-0};
}
-class Enc_3813442 : OpcodeHexagon {
+class Enc_7e5a82 : OpcodeHexagon {
bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_3135259 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5486172 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{2-0} = Nt8{2-0};
+class Enc_12b6e9 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11081334 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
+class Enc_6f70ca : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{8-4} = Ii{7-3};
}
-class Enc_9470751 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_7222b7 : OpcodeHexagon {
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_2683366 : OpcodeHexagon {
- bits <3> Quu8;
- let Inst{10-8} = Quu8{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Qdd8;
- let Inst{5-3} = Qdd8{2-0};
+class Enc_e3b0c4 : OpcodeHexagon {
}
-class Enc_15830826 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{10-0} = Ii{13-3};
+class Enc_a255dc : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4967902 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
+class Enc_cb4b4e : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
-}
-class Enc_14287645 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_8324216 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_913538 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
-}
-class Enc_16311032 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_9864697 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <6> II;
- let Inst{20-16} = II{5-1};
- let Inst{13-13} = II{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11205051 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{11-8} = Ii{5-2};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{3-0} = Rt16{3-0};
-}
-class Enc_5611087 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-5} = Ii{6-3};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
+class Enc_9cdba7 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_10915758 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8943121 : OpcodeHexagon {
+class Enc_5cd7e9 : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-5} = Ii{9-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_1539665 : OpcodeHexagon {
- bits <5> Cs32;
- let Inst{20-16} = Cs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_454a26 : OpcodeHexagon {
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8479583 : OpcodeHexagon {
+class Enc_a6853f : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{23-23} = n1{1-1};
+ bits <6> n1;
+ let Inst{29-29} = n1{5-5};
+ let Inst{26-25} = n1{4-3};
+ let Inst{23-22} = n1{2-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_313333 : OpcodeHexagon {
+class Enc_c175d0 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+}
+class Enc_895bd9 : OpcodeHexagon {
+ bits <2> Qu4;
+ let Inst{9-8} = Qu4{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
bits <5> Vx32;
let Inst{4-0} = Vx32{4-0};
}
-class Enc_11544269 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{13-13} = n1{0-0};
+class Enc_ea23e4 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_9018141 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Cd32;
- let Inst{4-0} = Cd32{4-0};
+class Enc_4dc228 : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{12-8} = Ii{8-4};
+ let Inst{4-3} = Ii{3-2};
+ bits <10> II;
+ let Inst{20-16} = II{9-5};
+ let Inst{7-5} = II{4-2};
+ let Inst{1-0} = II{1-0};
+}
+class Enc_10bc21 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_1aaec1 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_6152036 : OpcodeHexagon {
+class Enc_329361 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Gdd32;
- let Inst{4-0} = Gdd32{4-0};
-}
-class Enc_1954437 : OpcodeHexagon {
- bits <6> Sss64;
- let Inst{21-16} = Sss64{5-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_3742184 : OpcodeHexagon {
+class Enc_d2c7f1 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_1835415 : OpcodeHexagon {
+class Enc_3680c2 : OpcodeHexagon {
bits <7> Ii;
- let Inst{10-5} = Ii{6-1};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{11-5} = Ii{6-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_1085466 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_1ef990 : OpcodeHexagon {
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13150110 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
+class Enc_e957fb : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-0} = Ii{8-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_6772177 : OpcodeHexagon {
- bits <5> Zu8;
- let Inst{12-8} = Zu8{4-0};
- bits <5> Zd8;
- let Inst{4-0} = Zd8{4-0};
-}
-class Enc_6616512 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
+class Enc_c9e3bc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_1886960 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_2835415 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{10-5} = Ii{7-2};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14024197 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_12297800 : OpcodeHexagon {
- bits <18> Ii;
- let Inst{26-25} = Ii{17-16};
- let Inst{20-16} = Ii{15-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
}
-class Enc_7254313 : OpcodeHexagon {
+class Enc_2e1979 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -2793,20 +2301,12 @@ class Enc_7254313 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_677558 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-5} = Ii{8-3};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_6223403 : OpcodeHexagon {
+class Enc_0b2e5b : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
@@ -2814,220 +2314,178 @@ class Enc_6223403 : OpcodeHexagon {
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_674613 : OpcodeHexagon {
+class Enc_d483b9 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{5-5} = Ii{0-0};
bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_16479122 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{7-3} = Ii{7-3};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
-}
-class Enc_11704059 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_9165078 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{8-3} = Ii{8-3};
- bits <3> Rtt8;
- let Inst{2-0} = Rtt8{2-0};
+class Enc_51635c : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-4} = Ii{6-2};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_15376009 : OpcodeHexagon {
+class Enc_e26546 : OpcodeHexagon {
bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{6-3} = Ii{4-1};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_8838398 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{21-21} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <6> II;
- let Inst{13-8} = II{5-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_2328527 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_1451363 : OpcodeHexagon {
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_4030179 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_70fb07 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_13770697 : OpcodeHexagon {
+class Enc_277737 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{22-21} = Ii{7-6};
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-5} = Ii{4-2};
bits <5> Ru32;
let Inst{4-0} = Ru32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ry32;
- let Inst{12-8} = Ry32{4-0};
-}
-class Enc_12212978 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ let Inst{12-8} = Rd32{4-0};
+}
+class Enc_5c124a : OpcodeHexagon {
+ bits <19> Ii;
+ let Inst{26-25} = Ii{18-17};
+ let Inst{20-16} = Ii{16-12};
+ let Inst{13-13} = Ii{11-11};
+ let Inst{7-0} = Ii{10-3};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_12665927 : OpcodeHexagon {
+class Enc_928ca1 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_2082956 : OpcodeHexagon {
- bits <32> Ii;
- let Inst{27-16} = Ii{31-20};
- let Inst{13-0} = Ii{19-6};
+class Enc_da664b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_7b7ba8 : OpcodeHexagon {
+ bits <2> Qu4;
+ let Inst{9-8} = Qu4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+}
+class Enc_47ee5e : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ru32;
+ let Inst{12-8} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{2-0} = Nt8{2-0};
}
-class Enc_220949 : OpcodeHexagon {
+class Enc_8bcba4 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
+}
+class Enc_3a2484 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-23} = n1{3-1};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-23} = n1{2-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_9939385 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{12-8} = Ii{8-4};
- let Inst{4-3} = Ii{3-2};
- bits <10> II;
- let Inst{20-16} = II{9-5};
- let Inst{7-5} = II{4-2};
- let Inst{1-0} = II{1-0};
-}
-class Enc_2117024 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-8} = Ii{7-3};
- let Inst{4-2} = Ii{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8390029 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
+class Enc_a5ed8a : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_10989558 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_cb9321 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{27-21} = Ii{15-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_668704 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{25-22} = n1{3-0};
}
-class Enc_5972412 : OpcodeHexagon {
+class Enc_a7341a : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_12851489 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_9554661 : OpcodeHexagon {
+class Enc_5eac98 : OpcodeHexagon {
bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4202401 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6091631 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{9-8} = Qs4{1-0};
- bits <2> Qt4;
- let Inst{23-22} = Qt4{1-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_10157519 : OpcodeHexagon {
+class Enc_02553a : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{11-5} = Ii{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_4835423 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{10-5} = Ii{5-0};
+class Enc_acd6ed : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{10-5} = Ii{8-3};
bits <2> Pt4;
let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14046916 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
-}
-class Enc_2921694 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_8732960 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-8} = Ii{7-3};
- let Inst{4-2} = Ii{2-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5338033 : OpcodeHexagon {
+class Enc_8e583a : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
@@ -3035,355 +2493,212 @@ class Enc_5338033 : OpcodeHexagon {
let Inst{19-16} = Rs16{3-0};
bits <5> n1;
let Inst{28-28} = n1{4-4};
- let Inst{24-22} = n1{3-1};
+ let Inst{25-23} = n1{3-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_6956613 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_b886fd : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_2153798 : OpcodeHexagon {
+class Enc_24a7dc : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_16210172 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{10-8} = Qt8{2-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
-}
-class Enc_5023792 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_1244745 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_10002182 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
+class Enc_2d829e : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{10-0} = Ii{13-3};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
}
-class Enc_12492533 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_4f4ed7 : OpcodeHexagon {
+ bits <18> Ii;
+ let Inst{26-25} = Ii{17-16};
+ let Inst{20-16} = Ii{15-11};
+ let Inst{13-5} = Ii{10-2};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_1774350 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
+class Enc_84b2cd : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+}
+class Enc_8dbdfe : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{13-13} = Ii{7-7};
+ let Inst{7-3} = Ii{6-2};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_2703240 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+class Enc_90cd8b : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_6975103 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_bd0b33 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_9789480 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_12244921 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+class Enc_c7cd90 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_8674673 : OpcodeHexagon {
+class Enc_405228 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{23-22} = n1{1-0};
-}
-class Enc_8514936 : OpcodeHexagon {
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_13455308 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <3> n1;
+ let Inst{28-28} = n1{2-2};
+ let Inst{24-23} = n1{1-0};
}
-class Enc_10188026 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_81ac1d : OpcodeHexagon {
+ bits <24> Ii;
+ let Inst{24-16} = Ii{23-15};
+ let Inst{13-1} = Ii{14-2};
}
-class Enc_3158657 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_395cc4 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10597934 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
- bits <2> n1;
- let Inst{9-8} = n1{1-0};
-}
-class Enc_10612292 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <2> Qx4;
- let Inst{1-0} = Qx4{1-0};
-}
-class Enc_5178985 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_3967902 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2462143 : OpcodeHexagon {
+class Enc_a51a9a : OpcodeHexagon {
bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{12-8} = Ii{7-3};
+ let Inst{4-2} = Ii{2-0};
}
-class Enc_9849208 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
+class Enc_d44e31 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{4-0} = Rt32{4-0};
}
-class Enc_12618352 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_7303598 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
-}
-class Enc_13823098 : OpcodeHexagon {
- bits <5> Gss32;
- let Inst{20-16} = Gss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_16388420 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
+class Enc_f77fbc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_8328140 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
}
-class Enc_1793896 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_d2216a : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_4944558 : OpcodeHexagon {
- bits <2> Qu4;
- let Inst{9-8} = Qu4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
+class Enc_85bf58 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13211717 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{20-16} = Vvv32{4-0};
+class Enc_71bb9b : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_8170340 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <3> Qdd8;
- let Inst{2-0} = Qdd8{2-0};
+class Enc_52a5dd : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14071773 : OpcodeHexagon {
+class Enc_5e2823 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_8605375 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_28a2dc : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12711252 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{9-8} = Pv4{1-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_8202458 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_5138b3 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_8577055 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
+class Enc_84d359 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{3-0} = Ii{3-0};
bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-23} = n1{3-1};
- let Inst{8-8} = n1{0-0};
+ let Inst{7-4} = Rs16{3-0};
}
-class Enc_1409050 : OpcodeHexagon {
+class Enc_e07374 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
-}
-class Enc_7466005 : OpcodeHexagon {
- bits <5> Gs32;
- let Inst{20-16} = Gs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_2380082 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
+class Enc_323f2d : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_10067774 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_11000933 : OpcodeHexagon {
+class Enc_1a9974 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -3393,55 +2708,66 @@ class Enc_11000933 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
let Inst{12-8} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{2-0} = Nt8{2-0};
+ bits <5> Rtt32;
+ let Inst{4-0} = Rtt32{4-0};
}
-class Enc_13201267 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_1de724 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-22} = n1{2-0};
}
-class Enc_1989309 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{4-0} = Vvv32{4-0};
+class Enc_dd766a : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
+}
+class Enc_0b51ce : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_9082775 : OpcodeHexagon {
+class Enc_b4e6cf : OpcodeHexagon {
bits <10> Ii;
let Inst{21-21} = Ii{9-9};
let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_8065534 : OpcodeHexagon {
- bits <4> Ii;
+class Enc_44215c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
let Inst{6-3} = Ii{3-0};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_4631106 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_a21d47 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{10-5} = Ii{5-0};
bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Pu4;
- let Inst{7-6} = Pu4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ let Inst{12-11} = Pt4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_11065510 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
+class Enc_cc449f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
bits <5> Rt32;
@@ -3449,70 +2775,7 @@ class Enc_11065510 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6673186 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_8498433 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_4395009 : OpcodeHexagon {
- bits <7> Ii;
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10926598 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
-}
-class Enc_7606379 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_8131399 : OpcodeHexagon {
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_11522288 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_114098 : OpcodeHexagon {
+class Enc_645d54 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{5-5} = Ii{0-0};
@@ -3523,47 +2786,29 @@ class Enc_114098 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5654851 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_667b39 : OpcodeHexagon {
+ bits <5> Css32;
+ let Inst{20-16} = Css32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_12023037 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{6-5} = Ps4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_176263 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{9-4} = Ii{7-2};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_6130414 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{23-22} = Ii{15-14};
- let Inst{13-0} = Ii{13-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_631197 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <6> II;
- let Inst{23-21} = II{5-3};
- let Inst{7-5} = II{2-0};
+class Enc_927852 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_163a3c : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_16214129 : OpcodeHexagon {
+class Enc_b087ac : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
@@ -3571,507 +2816,412 @@ class Enc_16214129 : OpcodeHexagon {
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_8333157 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_4834775 : OpcodeHexagon {
- bits <6> II;
- let Inst{13-8} = II{5-0};
+class Enc_b1e1fb : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rd16;
- let Inst{19-16} = Rd16{3-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{25-23} = n1{3-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_16601956 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_1f19b5 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{9-5} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_b8c967 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_15946706 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{6-5} = Ii{1-0};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
-}
-class Enc_6923828 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
-}
-class Enc_1332717 : OpcodeHexagon {
+class Enc_fb6577 : OpcodeHexagon {
bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ let Inst{9-8} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_1786883 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <6> Sdd64;
- let Inst{5-0} = Sdd64{5-0};
-}
-class Enc_14303394 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9282127 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
-}
-class Enc_2813446 : OpcodeHexagon {
+class Enc_2bae10 : OpcodeHexagon {
bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_364753 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{23-23} = n1{0-0};
+ let Inst{10-8} = Ii{3-1};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_12477789 : OpcodeHexagon {
- bits <15> Ii;
- let Inst{21-21} = Ii{14-14};
- let Inst{13-13} = Ii{13-13};
- let Inst{11-1} = Ii{12-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_c4dc92 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_44555 : OpcodeHexagon {
+class Enc_03833b : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8497723 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_dbd70c : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_4359901 : OpcodeHexagon {
+class Enc_f6fe0b : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{22-22} = n1{0-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{24-22} = n1{4-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_11271630 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_9e2e1c : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_10501894 : OpcodeHexagon {
+class Enc_8df4be : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{26-25} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-5} = Ii{9-1};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_66bce1 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{11-8} = Rd16{3-0};
+}
+class Enc_b8309d : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{8-3} = Ii{8-3};
+ bits <3> Rtt8;
+ let Inst{2-0} = Rtt8{2-0};
}
-class Enc_9768377 : OpcodeHexagon {
+class Enc_5e8512 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_16268019 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_8814718 : OpcodeHexagon {
- bits <18> Ii;
- let Inst{26-25} = Ii{17-16};
- let Inst{20-16} = Ii{15-11};
- let Inst{13-5} = Ii{10-2};
+class Enc_4f677b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_6212930 : OpcodeHexagon {
+class Enc_3d920a : OpcodeHexagon {
bits <6> Ii;
let Inst{8-5} = Ii{5-2};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5462762 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+class Enc_e83554 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_6154421 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{13-13} = Ii{6-6};
- let Inst{7-3} = Ii{5-1};
+class Enc_ed48be : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{6-5} = Ii{1-0};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
+}
+class Enc_f8c1c4 : OpcodeHexagon {
bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ let Inst{12-11} = Pv4{1-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_8940892 : OpcodeHexagon {
+class Enc_1aa186 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_3531000 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{11-5} = Ii{6-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+class Enc_134437 : OpcodeHexagon {
+ bits <2> Qs4;
+ let Inst{9-8} = Qs4{1-0};
+ bits <2> Qt4;
+ let Inst{23-22} = Qt4{1-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_14311138 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_97d666 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2216485 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
+class Enc_f82eaf : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{10-5} = Ii{7-2};
+ bits <2> Pt4;
+ let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_12395768 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_11047413 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_1256611 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_7884306 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{8-4} = Ii{7-3};
-}
-class Enc_11244923 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8612939 : OpcodeHexagon {
+class Enc_69d63b : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{22-22} = n1{1-1};
- let Inst{13-13} = n1{0-0};
}
-class Enc_16355964 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
+class Enc_f79415 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+}
+class Enc_ce6828 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{26-25} = Ii{13-12};
+ let Inst{13-13} = Ii{11-11};
+ let Inst{7-0} = Ii{10-3};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12616482 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_5915771 : OpcodeHexagon {
+class Enc_800e04 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{24-22} = n1{3-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_14459927 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-22} = n1{4-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_7504828 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_ad1831 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_14209223 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_0fa531 : OpcodeHexagon {
+ bits <15> Ii;
+ let Inst{21-21} = Ii{14-14};
+ let Inst{13-13} = Ii{13-13};
+ let Inst{11-1} = Ii{12-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_3931661 : OpcodeHexagon {
+class Enc_7eaeb6 : OpcodeHexagon {
bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{6-3} = Ii{5-2};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13606251 : OpcodeHexagon {
+class Enc_f55a0c : OpcodeHexagon {
bits <6> Ii;
let Inst{11-8} = Ii{5-2};
bits <4> Rs16;
let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_11475992 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_13133231 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <4> Rt16;
+ let Inst{3-0} = Rt16{3-0};
}
-class Enc_9959498 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{22-21} = Ii{7-6};
- let Inst{13-13} = Ii{5-5};
- let Inst{7-5} = Ii{4-2};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
+class Enc_f20719 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
}
-class Enc_8919369 : OpcodeHexagon {
+class Enc_eafd18 : OpcodeHexagon {
+ bits <5> II;
+ let Inst{12-8} = II{4-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{24-23} = n1{3-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_2968094 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{11-5} = Ii{6-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
}
-class Enc_4813442 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_7b523d : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_4684887 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{25-23} = n1{2-0};
+class Enc_47ef61 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15606259 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_cc857d : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_2268028 : OpcodeHexagon {
- bits <3> Qtt8;
- let Inst{10-8} = Qtt8{2-0};
- bits <3> Qdd8;
- let Inst{5-3} = Qdd8{2-0};
+class Enc_7fa7f6 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_13430430 : OpcodeHexagon {
+class Enc_0f8bab : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <3> Qxx8;
- let Inst{2-0} = Qxx8{2-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_13336212 : OpcodeHexagon {
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
- bits <1> n1;
- let Inst{9-9} = n1{0-0};
+class Enc_7eb485 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_15008287 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_864a5a : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{12-8} = Ii{8-4};
+ let Inst{4-3} = Ii{3-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_4897205 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{9-8} = Qs4{1-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+class Enc_c2b48e : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8038806 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
+class Enc_8c6530 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12669374 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_971347 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_448f7f : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_1997594 : OpcodeHexagon {
+class Enc_da8d43 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-3} = Ii{4-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11940513 : OpcodeHexagon {
+class Enc_a6ce9c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{3-0} = Ii{5-2};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+}
+class Enc_eca7c8 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
@@ -4079,104 +3229,13 @@ class Enc_11940513 : OpcodeHexagon {
bits <5> Rt32;
let Inst{4-0} = Rt32{4-0};
}
-class Enc_2735552 : OpcodeHexagon {
+class Enc_4b39e4 : OpcodeHexagon {
bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_16410950 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6226085 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14193700 : OpcodeHexagon {
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_15763937 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <6> n1;
- let Inst{29-29} = n1{5-5};
- let Inst{26-25} = n1{4-3};
- let Inst{23-22} = n1{2-1};
- let Inst{13-13} = n1{0-0};
-}
-class Enc_2492727 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_13425035 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_4135257 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{10-8} = Ii{3-1};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_14631806 : OpcodeHexagon {
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_12397062 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_11959851 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td
index d910d4af2191..2dc74632e9be 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepInstrInfo.td
@@ -11,36 +11,39 @@ def A2_abs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = abs($Rs32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_absp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = abs($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000100;
+let prefersSlot3 = 1;
}
def A2_abssat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = abs($Rs32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_add : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011000;
@@ -56,145 +59,157 @@ def A2_addh_h16_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_sat_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_l16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_l16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_l16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_l16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = add($Rs32,#$Ii)",
-ALU32_ADDI_tc_1_SLOT0123, TypeALU32_ADDI>, Enc_11542684, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_ADDI>, Enc_cb9321, PredNewRel, ImmRegRel {
let Inst{31-28} = 0b1011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -213,7 +228,7 @@ def A2_addp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -224,10 +239,11 @@ def A2_addpsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let isCommutable = 1;
}
@@ -235,12 +251,13 @@ def A2_addsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -249,32 +266,34 @@ def A2_addsp : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rs32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64> {
+tc_bd16579e, TypeALU64> {
let isPseudo = 1;
}
def A2_addsph : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):raw:hi",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_bd16579e, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_addspl : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):raw:lo",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_bd16579e, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_and : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001000;
@@ -290,7 +309,7 @@ def A2_andir : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = and($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, ImmRegRel {
let Inst{31-22} = 0b0111011000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -306,7 +325,7 @@ def A2_andp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = and($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -316,7 +335,7 @@ def A2_aslh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000000;
let hasNewValue = 1;
@@ -328,7 +347,7 @@ def A2_asrh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000001;
let hasNewValue = 1;
@@ -340,7 +359,7 @@ def A2_combine_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.h,$Rs32.h)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011100;
@@ -352,7 +371,7 @@ def A2_combine_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.h,$Rs32.l)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011101;
@@ -364,7 +383,7 @@ def A2_combine_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.l,$Rs32.h)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011110;
@@ -376,7 +395,7 @@ def A2_combine_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.l,$Rs32.l)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011111;
@@ -388,7 +407,7 @@ def A2_combineii : HInst<
(outs DoubleRegs:$Rdd32),
(ins s32_0Imm:$Ii, s8_0Imm:$II),
"$Rdd32 = combine(#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_14007201 {
+tc_548f402d, TypeALU32_2op>, Enc_18c338 {
let Inst{31-23} = 0b011111000;
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
@@ -403,7 +422,7 @@ def A2_combinew : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1997594, PredNewRel {
+tc_548f402d, TypeALU32_3op>, Enc_be32a5, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110101000;
@@ -415,87 +434,95 @@ def A2_max : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = max($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_maxp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = max($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_maxu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = maxu($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_maxup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = maxu($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_min : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = min($Rt32,$Rs32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_minp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = min($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_minu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = minu($Rt32,$Rs32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_minup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = minu($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_neg : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = neg($Rs32)",
-PSEUDO, TypeALU32_2op> {
+tc_f16d5b17, TypeALU32_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -505,7 +532,7 @@ def A2_negp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = neg($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000100;
}
@@ -513,18 +540,19 @@ def A2_negsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = neg($Rs32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_nop : HInst<
(outs),
(ins),
"nop",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_0 {
+tc_e2c31426, TypeALU32_2op>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b0111111100000000;
}
@@ -532,7 +560,7 @@ def A2_not : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = not($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_f16d5b17, TypeALU32_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -542,7 +570,7 @@ def A2_notp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = not($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000100;
}
@@ -550,7 +578,7 @@ def A2_or : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001001;
@@ -566,7 +594,7 @@ def A2_orir : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = or($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, ImmRegRel {
let Inst{31-22} = 0b0111011010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -582,7 +610,7 @@ def A2_orp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = or($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -592,7 +620,7 @@ def A2_paddf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011000;
@@ -608,7 +636,7 @@ def A2_paddfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011000;
@@ -625,7 +653,7 @@ def A2_paddif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if (!$Pu4) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011101001;
let isPredicated = 1;
@@ -645,7 +673,7 @@ def A2_paddifnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if (!$Pu4.new) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-23} = 0b011101001;
let isPredicated = 1;
@@ -666,7 +694,7 @@ def A2_paddit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if ($Pu4) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011101000;
let isPredicated = 1;
@@ -685,7 +713,7 @@ def A2_padditnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if ($Pu4.new) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-23} = 0b011101000;
let isPredicated = 1;
@@ -705,7 +733,7 @@ def A2_paddt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011000;
@@ -720,7 +748,7 @@ def A2_paddtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011000;
@@ -736,7 +764,7 @@ def A2_pandf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001000;
@@ -750,7 +778,7 @@ def A2_pandfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001000;
@@ -765,7 +793,7 @@ def A2_pandt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001000;
@@ -778,7 +806,7 @@ def A2_pandtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001000;
@@ -792,7 +820,7 @@ def A2_porf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001001;
@@ -806,7 +834,7 @@ def A2_porfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001001;
@@ -821,7 +849,7 @@ def A2_port : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001001;
@@ -834,7 +862,7 @@ def A2_portnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001001;
@@ -848,7 +876,7 @@ def A2_psubf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011001;
@@ -862,7 +890,7 @@ def A2_psubfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011001;
@@ -877,7 +905,7 @@ def A2_psubt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011001;
@@ -890,7 +918,7 @@ def A2_psubtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011001;
@@ -904,7 +932,7 @@ def A2_pxorf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001011;
@@ -918,7 +946,7 @@ def A2_pxorfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001011;
@@ -933,7 +961,7 @@ def A2_pxort : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001011;
@@ -946,7 +974,7 @@ def A2_pxortnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001011;
@@ -960,18 +988,19 @@ def A2_roundsat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = round($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_94e6ffd9, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = sat($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
@@ -982,7 +1011,7 @@ def A2_satb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -993,7 +1022,7 @@ def A2_sath : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sath($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1004,7 +1033,7 @@ def A2_satub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satub($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1015,7 +1044,7 @@ def A2_satuh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satuh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1026,7 +1055,7 @@ def A2_sub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011001;
@@ -1041,145 +1070,157 @@ def A2_subh_h16_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_sat_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_l16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_l16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_l16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_l16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = sub($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -1188,7 +1229,7 @@ def A2_subri : HInst<
(outs IntRegs:$Rd32),
(ins s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = sub(#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, PredNewRel, ImmRegRel {
let Inst{31-22} = 0b0111011001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1204,12 +1245,13 @@ def A2_subsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1217,7 +1259,7 @@ def A2_svaddh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vaddh($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110000;
@@ -1230,12 +1272,13 @@ def A2_svaddhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vaddh($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -1244,12 +1287,13 @@ def A2_svadduhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vadduh($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -1258,12 +1302,13 @@ def A2_svavgh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vavgh($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_511f28f6, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
let isCommutable = 1;
}
@@ -1271,12 +1316,13 @@ def A2_svavghs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vavgh($Rs32,$Rt32):rnd",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_76c4c5ef, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
let isCommutable = 1;
}
@@ -1284,19 +1330,20 @@ def A2_svnavgh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vnavgh($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_511f28f6, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
}
def A2_svsubh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubh($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110100;
@@ -1308,12 +1355,13 @@ def A2_svsubhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubh($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1321,12 +1369,13 @@ def A2_svsubuhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubuh($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1334,7 +1383,7 @@ def A2_swiz : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = swiz($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -1344,7 +1393,7 @@ def A2_sxtb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000101;
let hasNewValue = 1;
@@ -1356,7 +1405,7 @@ def A2_sxth : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000111;
let hasNewValue = 1;
@@ -1368,7 +1417,7 @@ def A2_sxtw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = sxtw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100010;
}
@@ -1376,7 +1425,7 @@ def A2_tfr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000011;
let hasNewValue = 1;
@@ -1389,7 +1438,7 @@ def A2_tfrcrr : HInst<
(outs IntRegs:$Rd32),
(ins CtrRegs:$Cs32),
"$Rd32 = $Cs32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_1539665 {
+tc_3b4892c6, TypeCR>, Enc_0cb018 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01101010000;
let hasNewValue = 1;
@@ -1399,7 +1448,7 @@ def A2_tfrf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let hasNewValue = 1;
@@ -1414,7 +1463,7 @@ def A2_tfrfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let hasNewValue = 1;
@@ -1430,7 +1479,7 @@ def A2_tfrih : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, u16_0Imm:$Ii),
"$Rx32.h = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_6130414 {
+tc_548f402d, TypeALU32_2op>, Enc_51436c {
let Inst{21-21} = 0b1;
let Inst{31-24} = 0b01110010;
let hasNewValue = 1;
@@ -1441,7 +1490,7 @@ def A2_tfril : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, u16_0Imm:$Ii),
"$Rx32.l = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_6130414 {
+tc_548f402d, TypeALU32_2op>, Enc_51436c {
let Inst{21-21} = 0b1;
let Inst{31-24} = 0b01110001;
let hasNewValue = 1;
@@ -1452,7 +1501,7 @@ def A2_tfrp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let BaseOpcode = "A2_tfrp";
let isPredicable = 1;
let isPseudo = 1;
@@ -1461,7 +1510,7 @@ def A2_tfrpf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if (!$Pu4) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let BaseOpcode = "A2_tfrp";
@@ -1471,7 +1520,7 @@ def A2_tfrpfnew : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if (!$Pu4.new) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let isPredicatedNew = 1;
@@ -1482,7 +1531,7 @@ def A2_tfrpi : HInst<
(outs DoubleRegs:$Rdd32),
(ins s8_0Imm:$Ii),
"$Rdd32 = #$Ii",
-ALU64_tc_1_SLOT23, TypeALU64> {
+tc_548f402d, TypeALU64> {
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
let isMoveImm = 1;
@@ -1492,7 +1541,7 @@ def A2_tfrpt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if ($Pu4) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let BaseOpcode = "A2_tfrp";
let isPseudo = 1;
@@ -1501,7 +1550,7 @@ def A2_tfrptnew : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if ($Pu4.new) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedNew = 1;
let BaseOpcode = "A2_tfrp";
@@ -1511,7 +1560,7 @@ def A2_tfrrcr : HInst<
(outs CtrRegs:$Cd32),
(ins IntRegs:$Rs32),
"$Cd32 = $Rs32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9018141 {
+tc_82f0f122, TypeCR>, Enc_bd811a {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01100010001;
let hasNewValue = 1;
@@ -1521,7 +1570,7 @@ def A2_tfrsi : HInst<
(outs IntRegs:$Rd32),
(ins s32_0Imm:$Ii),
"$Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_7971062, PredNewRel, ImmRegRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e87ce, PredNewRel, ImmRegRel {
let Inst{21-21} = 0b0;
let Inst{31-24} = 0b01111000;
let hasNewValue = 1;
@@ -1543,7 +1592,7 @@ def A2_tfrt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1557,7 +1606,7 @@ def A2_tfrtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1572,41 +1621,45 @@ def A2_vabsh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
}
def A2_vabshsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsh($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vabsw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsw($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
}
def A2_vabswsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsw($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddb_map : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddb($Rss32,$Rtt32)",
-PSEUDO, TypeMAPPING> {
+tc_9c18c9a5, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -1614,7 +1667,7 @@ def A2_vaddh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1623,17 +1676,18 @@ def A2_vaddhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddh($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddub($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1642,27 +1696,29 @@ def A2_vaddubs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddub($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vadduhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vadduh($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1671,26 +1727,28 @@ def A2_vaddws : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddw($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vavgh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavghcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32):crnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_63cd9d2d, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
@@ -1700,79 +1758,87 @@ def A2_vavghr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavgub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgub($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavgubr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgub($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguh($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavguwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguw($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavgw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavgwcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32):crnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_63cd9d2d, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
@@ -1782,16 +1848,17 @@ def A2_vavgwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vcmpbeq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b110000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1800,7 +1867,7 @@ def A2_vcmpbgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b111000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1809,7 +1876,7 @@ def A2_vcmpheq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1818,7 +1885,7 @@ def A2_vcmphgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1827,7 +1894,7 @@ def A2_vcmphgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1836,7 +1903,7 @@ def A2_vcmpweq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1845,7 +1912,7 @@ def A2_vcmpwgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1854,7 +1921,7 @@ def A2_vcmpwgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1863,133 +1930,147 @@ def A2_vconj : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vconj($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000100;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vmaxb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxb($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxub($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxuh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxuh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxuw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxuw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vmaxw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vminb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminb($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vminh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminub($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminuh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminuh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminuw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminuw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vnavgh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_cd321066, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
+let prefersSlot3 = 1;
}
def A2_vnavghcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32):crnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2000,7 +2081,7 @@ def A2_vnavghr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32):rnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2011,16 +2092,17 @@ def A2_vnavgw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_cd321066, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
+let prefersSlot3 = 1;
}
def A2_vnavgwcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32):crnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2031,7 +2113,7 @@ def A2_vnavgwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32):rnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2042,7 +2124,7 @@ def A2_vraddub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vraddub($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -2052,7 +2134,7 @@ def A2_vraddub_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vraddub($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -2063,7 +2145,7 @@ def A2_vrsadub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrsadub($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -2073,7 +2155,7 @@ def A2_vrsadub_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrsadub($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -2084,7 +2166,7 @@ def A2_vsubb_map : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vsubb($Rss32,$Rtt32)",
-PSEUDO, TypeMAPPING> {
+tc_9c18c9a5, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -2092,7 +2174,7 @@ def A2_vsubh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubh($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2101,17 +2183,18 @@ def A2_vsubhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubh($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubub($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2120,27 +2203,29 @@ def A2_vsububs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubub($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubuhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubuh($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubw($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2149,17 +2234,18 @@ def A2_vsubws : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubw($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_xor : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001011;
@@ -2174,7 +2260,7 @@ def A2_xorp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = xor($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2184,7 +2270,7 @@ def A2_zxtb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let hasNewValue = 1;
let opNewValue = 0;
let BaseOpcode = "A2_zxtb";
@@ -2196,7 +2282,7 @@ def A2_zxth : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000110;
let hasNewValue = 1;
@@ -2208,7 +2294,7 @@ def A4_addp_c : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Px4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Px4in),
"$Rdd32 = add($Rss32,$Rtt32,$Px4):carry",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_151014 {
+tc_a87879e8, TypeS_3op>, Enc_2b3f60 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010110;
@@ -2219,7 +2305,7 @@ def A4_andn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = and($Rt32,~$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001100;
@@ -2231,7 +2317,7 @@ def A4_andnp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = and($Rtt32,~$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2240,32 +2326,34 @@ def A4_bitsplit : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = bitsplit($Rs32,$Rt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_1997594 {
+tc_7ca2ea10, TypeALU64>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010100001;
+let prefersSlot3 = 1;
}
def A4_bitspliti : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rdd32 = bitsplit($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_5654851 {
+tc_7ca2ea10, TypeS_2op>, Enc_311abd {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001000110;
+let prefersSlot3 = 1;
}
def A4_boundscheck : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rs32,$Rtt32)",
-M_tc_3x_SLOT23, TypeALU64> {
+tc_c58f771a, TypeALU64> {
let isPseudo = 1;
}
def A4_boundscheck_hi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rss32,$Rtt32):raw:hi",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -2274,7 +2362,7 @@ def A4_boundscheck_lo : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rss32,$Rtt32):raw:lo",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -2283,7 +2371,7 @@ def A4_cmpbeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.eq($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b110000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2296,7 +2384,7 @@ def A4_cmpbeqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Pd4 = cmpb.eq($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101000;
@@ -2309,7 +2397,7 @@ def A4_cmpbgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.gt($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2321,7 +2409,7 @@ def A4_cmpbgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s8_0Imm:$Ii),
"$Pd4 = cmpb.gt($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101001;
@@ -2333,7 +2421,7 @@ def A4_cmpbgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.gtu($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b111000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2345,7 +2433,7 @@ def A4_cmpbgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmpb.gtu($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3531000, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_02553a, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011101010;
@@ -2362,7 +2450,7 @@ def A4_cmpheq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.eq($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2375,7 +2463,7 @@ def A4_cmpheqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmph.eq($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101000;
@@ -2393,7 +2481,7 @@ def A4_cmphgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.gt($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2405,7 +2493,7 @@ def A4_cmphgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmph.gt($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101001;
@@ -2422,7 +2510,7 @@ def A4_cmphgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.gtu($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2434,7 +2522,7 @@ def A4_cmphgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmph.gtu($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3531000, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_02553a, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011101010;
@@ -2451,7 +2539,7 @@ def A4_combineii : HInst<
(outs DoubleRegs:$Rdd32),
(ins s8_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = combine(#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9864697 {
+tc_548f402d, TypeALU32_2op>, Enc_f0cca7 {
let Inst{31-21} = 0b01111100100;
let isExtendable = 1;
let opExtendable = 2;
@@ -2463,7 +2551,7 @@ def A4_combineir : HInst<
(outs DoubleRegs:$Rdd32),
(ins s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rdd32 = combine(#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_2462143 {
+tc_548f402d, TypeALU32_2op>, Enc_9cdba7 {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011001;
let isExtendable = 1;
@@ -2476,7 +2564,7 @@ def A4_combineri : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rdd32 = combine($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_2462143 {
+tc_548f402d, TypeALU32_2op>, Enc_9cdba7 {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011000;
let isExtendable = 1;
@@ -2489,7 +2577,7 @@ def A4_cround_ri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = cround($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2501,7 +2589,7 @@ def A4_cround_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cround($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -2513,14 +2601,14 @@ def A4_ext : HInst<
(outs),
(ins u26_6Imm:$Ii),
"immext(#$Ii)",
-EXTENDER_tc_1_SLOT0123, TypeEXTENDER>, Enc_2082956 {
+tc_9a13af9d, TypeEXTENDER>, Enc_2b518f {
let Inst{31-28} = 0b0000;
}
def A4_modwrapu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = modwrap($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2532,7 +2620,7 @@ def A4_orn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = or($Rt32,~$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001101;
@@ -2544,7 +2632,7 @@ def A4_ornp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = or($Rtt32,~$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2553,7 +2641,7 @@ def A4_paslhf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000000;
@@ -2567,7 +2655,7 @@ def A4_paslhfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000000;
@@ -2582,7 +2670,7 @@ def A4_paslht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000000;
@@ -2595,7 +2683,7 @@ def A4_paslhtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000000;
@@ -2609,7 +2697,7 @@ def A4_pasrhf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000001;
@@ -2623,7 +2711,7 @@ def A4_pasrhfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000001;
@@ -2638,7 +2726,7 @@ def A4_pasrht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000001;
@@ -2651,7 +2739,7 @@ def A4_pasrhtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000001;
@@ -2665,7 +2753,7 @@ def A4_psxtbf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000101;
@@ -2679,7 +2767,7 @@ def A4_psxtbfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000101;
@@ -2694,7 +2782,7 @@ def A4_psxtbt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000101;
@@ -2707,7 +2795,7 @@ def A4_psxtbtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000101;
@@ -2721,7 +2809,7 @@ def A4_psxthf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000111;
@@ -2735,7 +2823,7 @@ def A4_psxthfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000111;
@@ -2750,7 +2838,7 @@ def A4_psxtht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000111;
@@ -2763,7 +2851,7 @@ def A4_psxthtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000111;
@@ -2777,7 +2865,7 @@ def A4_pzxtbf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000100;
@@ -2791,7 +2879,7 @@ def A4_pzxtbfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000100;
@@ -2806,7 +2894,7 @@ def A4_pzxtbt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000100;
@@ -2819,7 +2907,7 @@ def A4_pzxtbtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000100;
@@ -2833,7 +2921,7 @@ def A4_pzxthf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000110;
@@ -2847,7 +2935,7 @@ def A4_pzxthfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000110;
@@ -2862,7 +2950,7 @@ def A4_pzxtht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000110;
@@ -2875,7 +2963,7 @@ def A4_pzxthtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000110;
@@ -2889,7 +2977,7 @@ def A4_rcmpeq : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011010;
@@ -2903,7 +2991,7 @@ def A4_rcmpeqi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_16355964, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_b8c967, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011010;
let hasNewValue = 1;
@@ -2920,7 +3008,7 @@ def A4_rcmpneq : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = !cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011011;
@@ -2934,7 +3022,7 @@ def A4_rcmpneqi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = !cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_16355964, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_b8c967, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011011;
let hasNewValue = 1;
@@ -2951,7 +3039,7 @@ def A4_round_ri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = round($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2963,7 +3051,7 @@ def A4_round_ri_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = round($Rs32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2976,7 +3064,7 @@ def A4_round_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = round($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -2988,7 +3076,7 @@ def A4_round_rr_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = round($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -3001,7 +3089,7 @@ def A4_subp_c : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Px4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Px4in),
"$Rdd32 = sub($Rss32,$Rtt32,$Px4):carry",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_151014 {
+tc_a87879e8, TypeS_3op>, Enc_2b3f60 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010111;
@@ -3012,7 +3100,7 @@ def A4_tfrcpp : HInst<
(outs DoubleRegs:$Rdd32),
(ins CtrRegs64:$Css32),
"$Rdd32 = $Css32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_13094118 {
+tc_3b4892c6, TypeCR>, Enc_667b39 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01101000000;
}
@@ -3020,7 +3108,7 @@ def A4_tfrpcp : HInst<
(outs CtrRegs64:$Cdd32),
(ins DoubleRegs:$Rss32),
"$Cdd32 = $Rss32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_1329520 {
+tc_82f0f122, TypeCR>, Enc_0ed752 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01100011001;
}
@@ -3028,7 +3116,7 @@ def A4_tlbmatch : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Pd4 = tlbmatch($Rss32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2492727 {
+tc_e2c08bb4, TypeALU64>, Enc_03833b {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3038,7 +3126,7 @@ def A4_vcmpbeq_any : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = any8(vcmpb.eq($Rss32,$Rtt32))",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3047,7 +3135,7 @@ def A4_vcmpbeqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u8_0Imm:$Ii),
"$Pd4 = vcmpb.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3056,7 +3144,7 @@ def A4_vcmpbgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3065,7 +3153,7 @@ def A4_vcmpbgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpb.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3074,7 +3162,7 @@ def A4_vcmpbgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmpb.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3083,7 +3171,7 @@ def A4_vcmpheqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmph.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3092,7 +3180,7 @@ def A4_vcmphgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmph.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3101,7 +3189,7 @@ def A4_vcmphgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmph.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3110,7 +3198,7 @@ def A4_vcmpweqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpw.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3119,7 +3207,7 @@ def A4_vcmpwgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpw.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3128,7 +3216,7 @@ def A4_vcmpwgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmpw.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b100;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3137,7 +3225,7 @@ def A4_vrmaxh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3148,7 +3236,7 @@ def A4_vrmaxuh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxuh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3159,7 +3247,7 @@ def A4_vrmaxuw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxuw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3170,7 +3258,7 @@ def A4_vrmaxw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3181,7 +3269,7 @@ def A4_vrminh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3192,7 +3280,7 @@ def A4_vrminuh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminuh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3203,7 +3291,7 @@ def A4_vrminuw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminuw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3214,7 +3302,7 @@ def A4_vrminw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3225,7 +3313,7 @@ def A5_ACS : HInst<
(outs DoubleRegs:$Rxx32, PredRegs:$Pe4),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32,$Pe4 = vacsh($Rss32,$Rtt32)",
-M_tc_3stall_SLOT23, TypeM>, Enc_12822813, Requires<[HasV55T]> {
+tc_ae0722f7, TypeM>, Enc_831a7d, Requires<[HasV55T]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -3238,7 +3326,7 @@ def A5_vaddhubs : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vaddhub($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9277990, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_3op>, Enc_d2216a, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
@@ -3251,7 +3339,7 @@ def A6_vminub_RdP : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Pe4),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32,$Pe4 = vminub($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_766909, Requires<[HasV62T]> {
+tc_583510c7, TypeM>, Enc_d2c7f1, Requires<[HasV62T]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -3262,7 +3350,7 @@ def C2_all8 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = all8($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011101000;
}
@@ -3270,7 +3358,7 @@ def C2_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = and($Pt4,$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011000000;
@@ -3279,7 +3367,7 @@ def C2_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = and($Pt4,!$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011011000;
@@ -3288,7 +3376,7 @@ def C2_any8 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = any8($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011100000;
}
@@ -3296,7 +3384,7 @@ def C2_bitsclr : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = bitsclr($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111100;
@@ -3305,7 +3393,7 @@ def C2_bitsclri : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u6_0Imm:$Ii),
"$Pd4 = bitsclr($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_14574598 {
+tc_5fa2857c, TypeS_2op>, Enc_5d6c34 {
let Inst{7-2} = 0b000000;
let Inst{31-21} = 0b10000101100;
}
@@ -3313,7 +3401,7 @@ def C2_bitsset : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = bitsset($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111010;
@@ -3322,7 +3410,7 @@ def C2_ccombinewf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111101000;
@@ -3334,7 +3422,7 @@ def C2_ccombinewnewf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111101000;
@@ -3347,7 +3435,7 @@ def C2_ccombinewnewt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111101000;
@@ -3359,7 +3447,7 @@ def C2_ccombinewt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111101000;
@@ -3370,7 +3458,7 @@ def C2_cmoveif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if (!$Pu4) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111101;
@@ -3392,7 +3480,7 @@ def C2_cmoveit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if ($Pu4) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111100;
@@ -3413,7 +3501,7 @@ def C2_cmovenewif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if (!$Pu4.new) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_b08be45e, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111101;
@@ -3436,7 +3524,7 @@ def C2_cmovenewit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if ($Pu4.new) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_b08be45e, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111100;
@@ -3458,7 +3546,7 @@ def C2_cmpeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010000;
@@ -3471,7 +3559,7 @@ def C2_cmpeqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-22} = 0b0111010100;
let CextOpcode = "C2_cmpeq";
@@ -3487,7 +3575,7 @@ def C2_cmpeqp : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3498,7 +3586,7 @@ def C2_cmpgei : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s8_0Imm:$Ii),
"$Pd4 = cmp.ge($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_9df8b0dc, TypeALU32_2op> {
let isCompare = 1;
let isPseudo = 1;
}
@@ -3506,7 +3594,7 @@ def C2_cmpgeui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Pd4 = cmp.geu($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_9df8b0dc, TypeALU32_2op> {
let isCompare = 1;
let isPseudo = 1;
}
@@ -3514,7 +3602,7 @@ def C2_cmpgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.gt($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010010;
@@ -3526,7 +3614,7 @@ def C2_cmpgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmp.gt($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-22} = 0b0111010101;
let CextOpcode = "C2_cmpgt";
@@ -3542,7 +3630,7 @@ def C2_cmpgtp : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3552,7 +3640,7 @@ def C2_cmpgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.gtu($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010011;
@@ -3564,7 +3652,7 @@ def C2_cmpgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmp.gtu($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_13249928, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_c0cdde, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-21} = 0b01110101100;
let CextOpcode = "C2_cmpgtu";
@@ -3580,7 +3668,7 @@ def C2_cmpgtup : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3590,7 +3678,7 @@ def C2_cmplt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.lt($Rs32,$Rt32)",
-PSEUDO, TypeALU32_3op> {
+tc_9df8b0dc, TypeALU32_3op> {
let isCompare = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -3599,7 +3687,7 @@ def C2_cmpltu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.ltu($Rs32,$Rt32)",
-PSEUDO, TypeALU32_3op> {
+tc_9df8b0dc, TypeALU32_3op> {
let isCompare = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -3608,7 +3696,7 @@ def C2_mask : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4),
"$Rdd32 = mask($Pt4)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_10328975 {
+tc_b86c7e8b, TypeS_2op>, Enc_78e566 {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b1000011000000000;
@@ -3617,7 +3705,7 @@ def C2_mux : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mux($Pu4,$Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139 {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110100000;
@@ -3629,7 +3717,7 @@ def C2_muxii : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii, s8_0Imm:$II),
"$Rd32 = mux($Pu4,#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9093094 {
+tc_1b6011fb, TypeALU32_2op>, Enc_830e5d {
let Inst{31-25} = 0b0111101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -3643,7 +3731,7 @@ def C2_muxir : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = mux($Pu4,$Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534 {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011100110;
let hasNewValue = 1;
@@ -3659,7 +3747,7 @@ def C2_muxri : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = mux($Pu4,#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534 {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011100111;
let hasNewValue = 1;
@@ -3675,7 +3763,7 @@ def C2_not : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = not($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011110000;
}
@@ -3683,7 +3771,7 @@ def C2_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = or($Pt4,$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011001000;
@@ -3692,7 +3780,7 @@ def C2_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = or($Pt4,!$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011111000;
@@ -3701,7 +3789,7 @@ def C2_pxfer_map : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = $Ps4",
-S_2op_tc_1_SLOT23, TypeMAPPING> {
+tc_d63b71d1, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -3709,7 +3797,7 @@ def C2_tfrpr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Ps4),
"$Rd32 = $Ps4",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_11139981 {
+tc_b86c7e8b, TypeS_2op>, Enc_f5e933 {
let Inst{13-5} = 0b000000000;
let Inst{31-18} = 0b10001001010000;
let hasNewValue = 1;
@@ -3719,7 +3807,7 @@ def C2_tfrrp : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32),
"$Pd4 = $Rs32",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_4527648 {
+tc_47f0b7ad, TypeS_2op>, Enc_48b75f {
let Inst{13-2} = 0b000000000000;
let Inst{31-21} = 0b10000101010;
}
@@ -3727,18 +3815,19 @@ def C2_vitpack : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Rd32 = vitpack($Ps4,$Pt4)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_6735062 {
+tc_7ca2ea10, TypeS_2op>, Enc_527412 {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b10001001000000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def C2_vmux : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmux($Pu4,$Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_7606379 {
+tc_d1b5a4b6, TypeALU64>, Enc_329361 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010001000;
@@ -3747,7 +3836,7 @@ def C2_xor : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = xor($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011010000;
@@ -3756,7 +3845,7 @@ def C4_addipc : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = add(pc,#$Ii)",
-CR_tc_2_SLOT3, TypeCR>, Enc_9554661 {
+tc_1fe8323c, TypeCR>, Enc_607661 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0110101001001001;
@@ -3772,7 +3861,7 @@ def C4_and_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,and($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011000100;
@@ -3781,7 +3870,7 @@ def C4_and_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,and($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011100100;
@@ -3790,7 +3879,7 @@ def C4_and_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,or($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011001100;
@@ -3799,7 +3888,7 @@ def C4_and_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,or($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011101100;
@@ -3808,7 +3897,7 @@ def C4_cmplte : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.gt($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010010;
@@ -3820,7 +3909,7 @@ def C4_cmpltei : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = !cmp.gt($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-22} = 0b0111010101;
let CextOpcode = "C4_cmplte";
@@ -3836,7 +3925,7 @@ def C4_cmplteu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.gtu($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010011;
@@ -3848,7 +3937,7 @@ def C4_cmplteui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = !cmp.gtu($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_13249928, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_c0cdde, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-21} = 0b01110101100;
let CextOpcode = "C4_cmplteu";
@@ -3864,7 +3953,7 @@ def C4_cmpneq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010000;
@@ -3877,7 +3966,7 @@ def C4_cmpneqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = !cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-22} = 0b0111010100;
let CextOpcode = "C4_cmpneq";
@@ -3893,7 +3982,7 @@ def C4_fastcorner9 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = fastcorner9($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b100100;
let Inst{13-10} = 0b1000;
let Inst{31-18} = 0b01101011000000;
@@ -3902,7 +3991,7 @@ def C4_fastcorner9_not : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = !fastcorner9($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b100100;
let Inst{13-10} = 0b1000;
let Inst{31-18} = 0b01101011000100;
@@ -3911,7 +4000,7 @@ def C4_nbitsclr : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !bitsclr($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111101;
@@ -3920,7 +4009,7 @@ def C4_nbitsclri : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u6_0Imm:$Ii),
"$Pd4 = !bitsclr($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_14574598 {
+tc_5fa2857c, TypeS_2op>, Enc_5d6c34 {
let Inst{7-2} = 0b000000;
let Inst{31-21} = 0b10000101101;
}
@@ -3928,7 +4017,7 @@ def C4_nbitsset : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !bitsset($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111011;
@@ -3937,7 +4026,7 @@ def C4_or_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,and($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011010100;
@@ -3946,7 +4035,7 @@ def C4_or_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,and($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011110100;
@@ -3955,7 +4044,7 @@ def C4_or_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,or($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011011100;
@@ -3964,7 +4053,7 @@ def C4_or_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,or($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011111100;
@@ -3973,319 +4062,293 @@ def F2_conv_d2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_d2df($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_d2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_d2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2d : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2d($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2d_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2d($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2ud : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2ud($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2ud_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2ud($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2uw : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2uw($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2uw_chop : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2uw($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000101;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2w : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2w($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2w_chop : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2w($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2d : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2d($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2d_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2d($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2ud : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2ud($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2ud_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2ud($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2uw : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2uw($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2uw_chop : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2uw($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001011011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2w : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2w($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2w_chop : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2w($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001011100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_ud2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_ud2df($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_ud2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_ud2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000001;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_uw2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_uw2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_uw2sf : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_uw2sf($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011001;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_w2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_w2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_w2sf : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_w2sf($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_dfclass : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Pd4 = dfclass($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_14400220, Requires<[HasV5T]> {
+tc_5fa2857c, TypeALU64>, Enc_1f19b5, Requires<[HasV5T]> {
let Inst{4-2} = 0b100;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b11011100100;
@@ -4296,7 +4359,7 @@ def F2_dfcmpeq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4308,7 +4371,7 @@ def F2_dfcmpge : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.ge($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4320,7 +4383,7 @@ def F2_dfcmpgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4332,7 +4395,7 @@ def F2_dfcmpuo : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.uo($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4344,7 +4407,7 @@ def F2_dfimm_n : HInst<
(outs DoubleRegs:$Rdd32),
(ins u10_0Imm:$Ii),
"$Rdd32 = dfmake(#$Ii):neg",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_2702036, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_e6c957, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101100101;
let prefersSlot3 = 1;
@@ -4353,7 +4416,7 @@ def F2_dfimm_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins u10_0Imm:$Ii),
"$Rdd32 = dfmake(#$Ii):pos",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_2702036, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_e6c957, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101100100;
let prefersSlot3 = 1;
@@ -4362,14 +4425,13 @@ def F2_sfadd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfadd($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let isCommutable = 1;
}
@@ -4377,7 +4439,7 @@ def F2_sfclass : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = sfclass($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742, Requires<[HasV5T]> {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101111;
@@ -4388,7 +4450,7 @@ def F2_sfcmpeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.eq($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4400,7 +4462,7 @@ def F2_sfcmpge : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.ge($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4412,7 +4474,7 @@ def F2_sfcmpgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.gt($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4424,7 +4486,7 @@ def F2_sfcmpuo : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.uo($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4436,52 +4498,48 @@ def F2_sffixupd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sffixupd($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011110;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffixupn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sffixupn($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011110;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffixupr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sffixupr($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011101;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffma : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4489,14 +4547,13 @@ def F2_sffma_lib : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += sfmpy($Rs32,$Rt32):lib",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4504,14 +4561,13 @@ def F2_sffma_sc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32, PredRegs:$Pu4),
"$Rx32 += sfmpy($Rs32,$Rt32,$Pu4):scale",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_15194851, Requires<[HasV5T]> {
+tc_2e55aa16, TypeM>, Enc_437f33, Requires<[HasV5T]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4519,14 +4575,13 @@ def F2_sffms : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4534,14 +4589,13 @@ def F2_sffms_lib : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= sfmpy($Rs32,$Rt32):lib",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4549,7 +4603,7 @@ def F2_sfimm_n : HInst<
(outs IntRegs:$Rd32),
(ins u10_0Imm:$Ii),
"$Rd32 = sfmake(#$Ii):neg",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9082775, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_6c9440, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101011001;
let hasNewValue = 1;
@@ -4560,7 +4614,7 @@ def F2_sfimm_p : HInst<
(outs IntRegs:$Rd32),
(ins u10_0Imm:$Ii),
"$Rd32 = sfmake(#$Ii):pos",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9082775, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_6c9440, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101011000;
let hasNewValue = 1;
@@ -4571,20 +4625,19 @@ def F2_sfinvsqrta : HInst<
(outs IntRegs:$Rd32, PredRegs:$Pe4),
(ins IntRegs:$Rs32),
"$Rd32,$Pe4 = sfinvsqrta($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_5718302, Requires<[HasV5T]> {
+tc_f1aa2cdb, TypeS_2op>, Enc_890909, Requires<[HasV5T]> {
let Inst{13-7} = 0b0000000;
let Inst{31-21} = 0b10001011111;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
let isPredicateLate = 1;
-let prefersSlot3 = 1;
}
def F2_sfmax : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmax($Rs32,$Rt32)",
-M_tc_2_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_f1240c08, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011100;
@@ -4598,7 +4651,7 @@ def F2_sfmin : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmin($Rs32,$Rt32)",
-M_tc_2_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_f1240c08, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011100;
@@ -4612,14 +4665,13 @@ def F2_sfmpy : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let isCommutable = 1;
}
@@ -4627,7 +4679,7 @@ def F2_sfrecipa : HInst<
(outs IntRegs:$Rd32, PredRegs:$Pe4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32,$Pe4 = sfrecipa($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_5853469, Requires<[HasV5T]> {
+tc_09c86199, TypeM>, Enc_a94f3b, Requires<[HasV5T]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011111;
@@ -4635,27 +4687,25 @@ let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
let isPredicateLate = 1;
-let prefersSlot3 = 1;
}
def F2_sfsub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfsub($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def J2_call : HInst<
(outs),
(ins a30_2Imm:$Ii),
"call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_13453446, PredRel {
+tc_639d93ee, TypeJ>, Enc_81ac1d, PredRel {
let Inst{0-0} = 0b0;
let Inst{31-25} = 0b0101101;
let isCall = 1;
@@ -4675,7 +4725,7 @@ def J2_callf : HInst<
(outs),
(ins PredRegs:$Pu4, a30_2Imm:$Ii),
"if (!$Pu4) call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_14868535, PredRel {
+tc_0767081f, TypeJ>, Enc_daea09, PredRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b1;
@@ -4699,7 +4749,7 @@ def J2_callr : HInst<
(outs),
(ins IntRegs:$Rs32),
"callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_ecfaae86, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010000101;
let cofMax1 = 1;
@@ -4713,7 +4763,7 @@ def J2_callrf : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953 {
+tc_84630363, TypeJ>, Enc_88d4d9 {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010001001;
@@ -4731,7 +4781,7 @@ def J2_callrt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953 {
+tc_84630363, TypeJ>, Enc_88d4d9 {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010001000;
@@ -4748,7 +4798,7 @@ def J2_callt : HInst<
(outs),
(ins PredRegs:$Pu4, a30_2Imm:$Ii),
"if ($Pu4) call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_14868535, PredRel {
+tc_0767081f, TypeJ>, Enc_daea09, PredRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b0;
@@ -4771,16 +4821,18 @@ def J2_endloop0 : HInst<
(outs),
(ins),
"endloop0",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC0, SA0];
let Defs = [LC0, P3, PC, USR];
+let isBranch = 1;
+let isTerminator = 1;
let isPseudo = 1;
}
def J2_endloop01 : HInst<
(outs),
(ins),
"endloop01",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC0, LC1, SA0, SA1];
let Defs = [LC0, LC1, P3, PC, USR];
let isPseudo = 1;
@@ -4789,16 +4841,18 @@ def J2_endloop1 : HInst<
(outs),
(ins),
"endloop1",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC1, SA1];
let Defs = [LC1, PC];
+let isBranch = 1;
+let isTerminator = 1;
let isPseudo = 1;
}
def J2_jump : HInst<
(outs),
(ins b30_2Imm:$Ii),
"jump $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_13453446, PredNewRel {
+tc_a333d2a9, TypeJ>, Enc_81ac1d, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{31-25} = 0b0101100;
let isTerminator = 1;
@@ -4818,7 +4872,7 @@ def J2_jumpf : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_1b834fe7, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b1;
@@ -4841,7 +4895,7 @@ def J2_jumpf_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, b15_2Imm:$Ii),
"if (!$Pu4) jump $Ii",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_1b834fe7, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -4849,7 +4903,7 @@ def J2_jumpfnew : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4.new) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b010;
let Inst{21-21} = 0b1;
@@ -4873,7 +4927,7 @@ def J2_jumpfnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4.new) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b110;
let Inst{21-21} = 0b1;
@@ -4897,7 +4951,7 @@ def J2_jumpfpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, Requires<[HasV60T]>, PredNewRel {
+tc_b5bfaa60, TypeJ>, Enc_daea09, Requires<[HasV60T]>, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b100;
let Inst{21-21} = 0b1;
@@ -4920,7 +4974,7 @@ def J2_jumpr : HInst<
(outs),
(ins IntRegs:$Rs32),
"jumpr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059, PredNewRel {
+tc_b08b653e, TypeJ>, Enc_ecbcc8, PredNewRel {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010010100;
let isTerminator = 1;
@@ -4937,7 +4991,7 @@ def J2_jumprf : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_07ac815d, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010011011;
@@ -4956,7 +5010,7 @@ def J2_jumprf_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr $Rs32",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_07ac815d, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -4964,7 +5018,7 @@ def J2_jumprfnew : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0010;
let Inst{31-21} = 0b01010011011;
@@ -4984,7 +5038,7 @@ def J2_jumprfnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0110;
let Inst{31-21} = 0b01010011011;
@@ -5004,7 +5058,7 @@ def J2_jumprfpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, Requires<[HasV60T]>, PredNewRel {
+tc_a1fb80e1, TypeJ>, Enc_88d4d9, Requires<[HasV60T]>, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0100;
let Inst{31-21} = 0b01010011011;
@@ -5023,7 +5077,7 @@ def J2_jumprgtez : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32>=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000101;
@@ -5038,7 +5092,7 @@ def J2_jumprgtezpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32>=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000101;
@@ -5053,7 +5107,7 @@ def J2_jumprltez : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32<=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000111;
@@ -5068,7 +5122,7 @@ def J2_jumprltezpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32<=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000111;
@@ -5083,7 +5137,7 @@ def J2_jumprnz : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32==#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000110;
@@ -5098,7 +5152,7 @@ def J2_jumprnzpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32==#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000110;
@@ -5113,7 +5167,7 @@ def J2_jumprt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_07ac815d, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010011010;
@@ -5131,7 +5185,7 @@ def J2_jumprt_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr $Rs32",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_07ac815d, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -5139,7 +5193,7 @@ def J2_jumprtnew : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0010;
let Inst{31-21} = 0b01010011010;
@@ -5158,7 +5212,7 @@ def J2_jumprtnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0110;
let Inst{31-21} = 0b01010011010;
@@ -5177,7 +5231,7 @@ def J2_jumprtpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, Requires<[HasV60T]>, PredNewRel {
+tc_a1fb80e1, TypeJ>, Enc_88d4d9, Requires<[HasV60T]>, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0100;
let Inst{31-21} = 0b01010011010;
@@ -5195,7 +5249,7 @@ def J2_jumprz : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32!=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000100;
@@ -5210,7 +5264,7 @@ def J2_jumprzpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32!=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000100;
@@ -5225,7 +5279,7 @@ def J2_jumpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_1b834fe7, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b0;
@@ -5247,7 +5301,7 @@ def J2_jumpt_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, b15_2Imm:$Ii),
"if ($Pu4) jump $Ii",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_1b834fe7, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -5255,7 +5309,7 @@ def J2_jumptnew : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4.new) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b010;
let Inst{21-21} = 0b0;
@@ -5278,7 +5332,7 @@ def J2_jumptnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4.new) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b110;
let Inst{21-21} = 0b0;
@@ -5301,7 +5355,7 @@ def J2_jumptpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, Requires<[HasV60T]>, PredNewRel {
+tc_b5bfaa60, TypeJ>, Enc_daea09, Requires<[HasV60T]>, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b100;
let Inst{21-21} = 0b0;
@@ -5323,7 +5377,7 @@ def J2_loop0i : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"loop0($Ii,#$II)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9939385 {
+tc_1000eb10, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001000;
@@ -5338,7 +5392,7 @@ def J2_loop0r : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"loop0($Ii,$Rs32)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_5790679 {
+tc_f055fbb6, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5354,7 +5408,7 @@ def J2_loop1i : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"loop1($Ii,#$II)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9939385 {
+tc_1000eb10, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001001;
@@ -5369,7 +5423,7 @@ def J2_loop1r : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"loop1($Ii,$Rs32)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_5790679 {
+tc_f055fbb6, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5385,7 +5439,7 @@ def J2_pause : HInst<
(outs),
(ins u8_0Imm:$Ii),
"pause(#$Ii)",
-J_tc_2early_SLOT2, TypeJ>, Enc_8732960 {
+tc_b189ad4c, TypeJ>, Enc_a51a9a {
let Inst{1-0} = 0b00;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5396,7 +5450,7 @@ def J2_ploop1si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp1loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001101;
@@ -5412,7 +5466,7 @@ def J2_ploop1sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp1loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5429,7 +5483,7 @@ def J2_ploop2si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp2loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001110;
@@ -5445,7 +5499,7 @@ def J2_ploop2sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp2loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5462,7 +5516,7 @@ def J2_ploop3si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp3loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001111;
@@ -5478,7 +5532,7 @@ def J2_ploop3sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp3loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5495,7 +5549,7 @@ def J2_trap0 : HInst<
(outs),
(ins u8_0Imm:$Ii),
"trap0(#$Ii)",
-J_tc_2early_SLOT2, TypeJ>, Enc_8732960 {
+tc_cbe45117, TypeJ>, Enc_a51a9a {
let Inst{1-0} = 0b00;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5506,7 +5560,7 @@ def J4_cmpeq_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5531,7 +5585,7 @@ def J4_cmpeq_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5556,7 +5610,7 @@ def J4_cmpeq_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010001;
@@ -5579,7 +5633,7 @@ def J4_cmpeq_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010001;
@@ -5602,7 +5656,7 @@ def J4_cmpeq_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010001;
@@ -5625,7 +5679,7 @@ def J4_cmpeq_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010001;
@@ -5648,7 +5702,7 @@ def J4_cmpeq_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5672,7 +5726,7 @@ def J4_cmpeq_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5696,7 +5750,7 @@ def J4_cmpeq_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010000;
@@ -5718,7 +5772,7 @@ def J4_cmpeq_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010000;
@@ -5740,7 +5794,7 @@ def J4_cmpeq_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010000;
@@ -5762,7 +5816,7 @@ def J4_cmpeq_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010000;
@@ -5784,7 +5838,7 @@ def J4_cmpeqi_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5809,7 +5863,7 @@ def J4_cmpeqi_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5834,7 +5888,7 @@ def J4_cmpeqi_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000001;
@@ -5857,7 +5911,7 @@ def J4_cmpeqi_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000001;
@@ -5880,7 +5934,7 @@ def J4_cmpeqi_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001001;
@@ -5903,7 +5957,7 @@ def J4_cmpeqi_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001001;
@@ -5926,7 +5980,7 @@ def J4_cmpeqi_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5950,7 +6004,7 @@ def J4_cmpeqi_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5974,7 +6028,7 @@ def J4_cmpeqi_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000000;
@@ -5996,7 +6050,7 @@ def J4_cmpeqi_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000000;
@@ -6018,7 +6072,7 @@ def J4_cmpeqi_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001000;
@@ -6040,7 +6094,7 @@ def J4_cmpeqi_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001000;
@@ -6062,7 +6116,7 @@ def J4_cmpeqn1_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4359901, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_e90a15, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6087,7 +6141,7 @@ def J4_cmpeqn1_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8612939, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_5a18b3, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6112,7 +6166,7 @@ def J4_cmpeqn1_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_844699, PredRel {
+tc_d108a090, TypeCJ>, Enc_1de724, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001000111;
@@ -6135,7 +6189,7 @@ def J4_cmpeqn1_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5338033, PredRel {
+tc_d108a090, TypeCJ>, Enc_14640c, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001000111;
@@ -6158,7 +6212,7 @@ def J4_cmpeqn1_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14150875, PredRel {
+tc_d108a090, TypeCJ>, Enc_668704, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001001111;
@@ -6181,7 +6235,7 @@ def J4_cmpeqn1_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_15450971, PredRel {
+tc_d108a090, TypeCJ>, Enc_800e04, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001001111;
@@ -6204,7 +6258,7 @@ def J4_cmpeqn1_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_14998517, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_4aca3a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6228,7 +6282,7 @@ def J4_cmpeqn1_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_11544269, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_f7ea77, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6252,7 +6306,7 @@ def J4_cmpeqn1_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5401217, PredRel {
+tc_d108a090, TypeCJ>, Enc_405228, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001000110;
@@ -6274,7 +6328,7 @@ def J4_cmpeqn1_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12419313, PredRel {
+tc_d108a090, TypeCJ>, Enc_3a2484, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001000110;
@@ -6296,7 +6350,7 @@ def J4_cmpeqn1_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_4684887, PredRel {
+tc_d108a090, TypeCJ>, Enc_736575, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001001110;
@@ -6318,7 +6372,7 @@ def J4_cmpeqn1_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_220949, PredRel {
+tc_d108a090, TypeCJ>, Enc_8e583a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001001110;
@@ -6340,7 +6394,7 @@ def J4_cmpgt_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6365,7 +6419,7 @@ def J4_cmpgt_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6390,7 +6444,7 @@ def J4_cmpgt_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010011;
@@ -6413,7 +6467,7 @@ def J4_cmpgt_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010011;
@@ -6436,7 +6490,7 @@ def J4_cmpgt_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010011;
@@ -6459,7 +6513,7 @@ def J4_cmpgt_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010011;
@@ -6482,7 +6536,7 @@ def J4_cmpgt_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6506,7 +6560,7 @@ def J4_cmpgt_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6530,7 +6584,7 @@ def J4_cmpgt_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010010;
@@ -6552,7 +6606,7 @@ def J4_cmpgt_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010010;
@@ -6574,7 +6628,7 @@ def J4_cmpgt_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010010;
@@ -6596,7 +6650,7 @@ def J4_cmpgt_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010010;
@@ -6618,7 +6672,7 @@ def J4_cmpgti_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6643,7 +6697,7 @@ def J4_cmpgti_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6668,7 +6722,7 @@ def J4_cmpgti_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000011;
@@ -6691,7 +6745,7 @@ def J4_cmpgti_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000011;
@@ -6714,7 +6768,7 @@ def J4_cmpgti_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001011;
@@ -6737,7 +6791,7 @@ def J4_cmpgti_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001011;
@@ -6760,7 +6814,7 @@ def J4_cmpgti_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6784,7 +6838,7 @@ def J4_cmpgti_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6808,7 +6862,7 @@ def J4_cmpgti_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000010;
@@ -6830,7 +6884,7 @@ def J4_cmpgti_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000010;
@@ -6852,7 +6906,7 @@ def J4_cmpgti_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001010;
@@ -6874,7 +6928,7 @@ def J4_cmpgti_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001010;
@@ -6896,7 +6950,7 @@ def J4_cmpgtn1_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8674673, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_3694bd, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6921,7 +6975,7 @@ def J4_cmpgtn1_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15763937, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_a6853f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6946,7 +7000,7 @@ def J4_cmpgtn1_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5915771, PredRel {
+tc_d108a090, TypeCJ>, Enc_a42857, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001000111;
@@ -6969,7 +7023,7 @@ def J4_cmpgtn1_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7315939, PredRel {
+tc_d108a090, TypeCJ>, Enc_f6fe0b, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001000111;
@@ -6992,7 +7046,7 @@ def J4_cmpgtn1_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7785569, PredRel {
+tc_d108a090, TypeCJ>, Enc_3e3989, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001001111;
@@ -7015,7 +7069,7 @@ def J4_cmpgtn1_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_10968391, PredRel {
+tc_d108a090, TypeCJ>, Enc_b909d2, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001001111;
@@ -7038,7 +7092,7 @@ def J4_cmpgtn1_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_364753, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_f82302, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -7062,7 +7116,7 @@ def J4_cmpgtn1_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8479583, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_6413b6, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -7086,7 +7140,7 @@ def J4_cmpgtn1_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_2428539, PredRel {
+tc_d108a090, TypeCJ>, Enc_b78edd, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001000110;
@@ -7108,7 +7162,7 @@ def J4_cmpgtn1_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_8919369, PredRel {
+tc_d108a090, TypeCJ>, Enc_041d7b, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001000110;
@@ -7130,7 +7184,7 @@ def J4_cmpgtn1_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_8577055, PredRel {
+tc_d108a090, TypeCJ>, Enc_b1e1fb, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001001110;
@@ -7152,7 +7206,7 @@ def J4_cmpgtn1_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14530015, PredRel {
+tc_d108a090, TypeCJ>, Enc_178717, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001001110;
@@ -7174,7 +7228,7 @@ def J4_cmpgtu_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7199,7 +7253,7 @@ def J4_cmpgtu_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7224,7 +7278,7 @@ def J4_cmpgtu_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010101;
@@ -7247,7 +7301,7 @@ def J4_cmpgtu_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010101;
@@ -7270,7 +7324,7 @@ def J4_cmpgtu_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010101;
@@ -7293,7 +7347,7 @@ def J4_cmpgtu_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010101;
@@ -7316,7 +7370,7 @@ def J4_cmpgtu_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7340,7 +7394,7 @@ def J4_cmpgtu_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7364,7 +7418,7 @@ def J4_cmpgtu_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010100;
@@ -7386,7 +7440,7 @@ def J4_cmpgtu_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010100;
@@ -7408,7 +7462,7 @@ def J4_cmpgtu_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010100;
@@ -7430,7 +7484,7 @@ def J4_cmpgtu_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010100;
@@ -7452,7 +7506,7 @@ def J4_cmpgtui_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7477,7 +7531,7 @@ def J4_cmpgtui_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7502,7 +7556,7 @@ def J4_cmpgtui_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000101;
@@ -7525,7 +7579,7 @@ def J4_cmpgtui_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000101;
@@ -7548,7 +7602,7 @@ def J4_cmpgtui_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001101;
@@ -7571,7 +7625,7 @@ def J4_cmpgtui_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001101;
@@ -7594,7 +7648,7 @@ def J4_cmpgtui_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7618,7 +7672,7 @@ def J4_cmpgtui_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7642,7 +7696,7 @@ def J4_cmpgtui_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000100;
@@ -7664,7 +7718,7 @@ def J4_cmpgtui_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000100;
@@ -7686,7 +7740,7 @@ def J4_cmpgtui_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001100;
@@ -7708,7 +7762,7 @@ def J4_cmpgtui_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001100;
@@ -7730,7 +7784,7 @@ def J4_cmplt_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gt($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7755,7 +7809,7 @@ def J4_cmplt_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gt($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7780,7 +7834,7 @@ def J4_cmplt_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gt($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7804,7 +7858,7 @@ def J4_cmplt_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gt($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7828,7 +7882,7 @@ def J4_cmpltu_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gtu($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7853,7 +7907,7 @@ def J4_cmpltu_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gtu($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7878,7 +7932,7 @@ def J4_cmpltu_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gtu($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7902,7 +7956,7 @@ def J4_cmpltu_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gtu($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7926,7 +7980,7 @@ def J4_hintjumpr : HInst<
(outs),
(ins IntRegs:$Rs32),
"hintjr($Rs32)",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_b08b653e, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010010101;
let isTerminator = 1;
@@ -7938,7 +7992,7 @@ def J4_jumpseti : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u6_0Imm:$II, b30_2Imm:$Ii),
"$Rd16 = #$II ; jump $Ii",
-COMPOUND, TypeCJ>, Enc_4834775 {
+tc_1e062b18, TypeCJ>, Enc_9e4c3f {
let Inst{0-0} = 0b0;
let Inst{31-22} = 0b0001011000;
let hasNewValue = 1;
@@ -7956,7 +8010,7 @@ def J4_jumpsetr : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"$Rd16 = $Rs16 ; jump $Ii",
-COMPOUND, TypeCJ>, Enc_2639299 {
+tc_1e062b18, TypeCJ>, Enc_66bce1 {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001011100;
@@ -7975,7 +8029,7 @@ def J4_tstbit0_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!tstbit($Ns8.new,#0)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -7999,7 +8053,7 @@ def J4_tstbit0_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!tstbit($Ns8.new,#0)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -8023,7 +8077,7 @@ def J4_tstbit0_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001000111;
@@ -8045,7 +8099,7 @@ def J4_tstbit0_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001000111;
@@ -8067,7 +8121,7 @@ def J4_tstbit0_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001001111;
@@ -8089,7 +8143,7 @@ def J4_tstbit0_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001001111;
@@ -8111,7 +8165,7 @@ def J4_tstbit0_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (tstbit($Ns8.new,#0)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -8134,7 +8188,7 @@ def J4_tstbit0_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (tstbit($Ns8.new,#0)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -8157,7 +8211,7 @@ def J4_tstbit0_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001000110;
@@ -8178,7 +8232,7 @@ def J4_tstbit0_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001000110;
@@ -8199,7 +8253,7 @@ def J4_tstbit0_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001001110;
@@ -8220,7 +8274,7 @@ def J4_tstbit0_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001001110;
@@ -8241,7 +8295,7 @@ def L2_deallocframe : HInst<
(outs),
(ins),
"deallocframe",
-LD_tc_ld_SLOT01, TypeLD>, Enc_0 {
+tc_c1dbc916, TypeLD>, Enc_3a3d62 {
let Inst{4-0} = 0b11110;
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010000000;
@@ -8255,7 +8309,7 @@ def L2_loadalignb_io : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Ryy32 = memb_fifo($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_449439 {
+tc_14da557c, TypeLD>, Enc_a27588 {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8272,7 +8326,7 @@ def L2_loadalignb_pbr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110100;
let accessSize = ByteAccess;
@@ -8283,7 +8337,7 @@ def L2_loadalignb_pci : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_971347 {
+tc_d2a33af5, TypeLD>, Enc_74aef2 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000100;
let addrMode = PostInc;
@@ -8296,7 +8350,7 @@ def L2_loadalignb_pcr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000100;
let addrMode = PostInc;
@@ -8309,7 +8363,7 @@ def L2_loadalignb_pi : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Ryy32 = memb_fifo($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6372758 {
+tc_ae762521, TypeLD>, Enc_6b197f {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010100;
let addrMode = PostInc;
@@ -8321,7 +8375,7 @@ def L2_loadalignb_pr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100100;
let addrMode = PostInc;
@@ -8333,7 +8387,7 @@ def L2_loadalignb_zomap : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32),
"$Ryy32 = memb_fifo($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Constraints = "$Ryy32 = $Ryy32in";
@@ -8342,7 +8396,7 @@ def L2_loadalignh_io : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32, s31_1Imm:$Ii),
"$Ryy32 = memh_fifo($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11930027 {
+tc_14da557c, TypeLD>, Enc_5cd7e9 {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8359,7 +8413,7 @@ def L2_loadalignh_pbr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110010;
let accessSize = HalfWordAccess;
@@ -8370,7 +8424,7 @@ def L2_loadalignh_pci : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_1971351 {
+tc_d2a33af5, TypeLD>, Enc_9e2e1c {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000010;
let addrMode = PostInc;
@@ -8383,7 +8437,7 @@ def L2_loadalignh_pcr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000010;
let addrMode = PostInc;
@@ -8396,7 +8450,7 @@ def L2_loadalignh_pi : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Ryy32 = memh_fifo($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_3372766 {
+tc_ae762521, TypeLD>, Enc_bd1cbc {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010010;
let addrMode = PostInc;
@@ -8408,7 +8462,7 @@ def L2_loadalignh_pr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100010;
let addrMode = PostInc;
@@ -8420,7 +8474,7 @@ def L2_loadalignh_zomap : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32),
"$Ryy32 = memh_fifo($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Constraints = "$Ryy32 = $Ryy32in";
@@ -8429,7 +8483,7 @@ def L2_loadbsw2_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = membh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738 {
+tc_bf6fa601, TypeLD>, Enc_de0214 {
let Inst{24-21} = 0b0001;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8447,7 +8501,7 @@ def L2_loadbsw2_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110001;
let hasNewValue = 1;
@@ -8460,7 +8514,7 @@ def L2_loadbsw2_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000001;
let hasNewValue = 1;
@@ -8475,7 +8529,7 @@ def L2_loadbsw2_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000001;
let hasNewValue = 1;
@@ -8490,7 +8544,7 @@ def L2_loadbsw2_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = membh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009 {
+tc_65dc7cc4, TypeLD>, Enc_152467 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010001;
let hasNewValue = 1;
@@ -8504,7 +8558,7 @@ def L2_loadbsw2_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100001;
let hasNewValue = 1;
@@ -8518,7 +8572,7 @@ def L2_loadbsw2_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = membh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8528,7 +8582,7 @@ def L2_loadbsw4_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rdd32 = membh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_9852473 {
+tc_bf6fa601, TypeLD>, Enc_2d7491 {
let Inst{24-21} = 0b0111;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8544,7 +8598,7 @@ def L2_loadbsw4_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110111;
let accessSize = WordAccess;
@@ -8555,7 +8609,7 @@ def L2_loadbsw4_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_3931661 {
+tc_3eab77bd, TypeLD>, Enc_70b24b {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000111;
let addrMode = PostInc;
@@ -8568,7 +8622,7 @@ def L2_loadbsw4_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000111;
let addrMode = PostInc;
@@ -8581,7 +8635,7 @@ def L2_loadbsw4_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rdd32 = membh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_8752140 {
+tc_65dc7cc4, TypeLD>, Enc_71f1b4 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010111;
let addrMode = PostInc;
@@ -8593,7 +8647,7 @@ def L2_loadbsw4_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100111;
let addrMode = PostInc;
@@ -8605,7 +8659,7 @@ def L2_loadbsw4_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = membh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -8613,7 +8667,7 @@ def L2_loadbzw2_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memubh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738 {
+tc_bf6fa601, TypeLD>, Enc_de0214 {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8631,7 +8685,7 @@ def L2_loadbzw2_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110011;
let hasNewValue = 1;
@@ -8644,7 +8698,7 @@ def L2_loadbzw2_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000011;
let hasNewValue = 1;
@@ -8659,7 +8713,7 @@ def L2_loadbzw2_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000011;
let hasNewValue = 1;
@@ -8674,7 +8728,7 @@ def L2_loadbzw2_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memubh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009 {
+tc_65dc7cc4, TypeLD>, Enc_152467 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010011;
let hasNewValue = 1;
@@ -8688,7 +8742,7 @@ def L2_loadbzw2_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100011;
let hasNewValue = 1;
@@ -8702,7 +8756,7 @@ def L2_loadbzw2_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memubh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8712,7 +8766,7 @@ def L2_loadbzw4_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rdd32 = memubh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_9852473 {
+tc_bf6fa601, TypeLD>, Enc_2d7491 {
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8728,7 +8782,7 @@ def L2_loadbzw4_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110101;
let accessSize = WordAccess;
@@ -8739,7 +8793,7 @@ def L2_loadbzw4_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_3931661 {
+tc_3eab77bd, TypeLD>, Enc_70b24b {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000101;
let addrMode = PostInc;
@@ -8752,7 +8806,7 @@ def L2_loadbzw4_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000101;
let addrMode = PostInc;
@@ -8765,7 +8819,7 @@ def L2_loadbzw4_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rdd32 = memubh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_8752140 {
+tc_65dc7cc4, TypeLD>, Enc_71f1b4 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010101;
let addrMode = PostInc;
@@ -8777,7 +8831,7 @@ def L2_loadbzw4_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100101;
let addrMode = PostInc;
@@ -8789,7 +8843,7 @@ def L2_loadbzw4_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memubh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -8797,7 +8851,7 @@ def L2_loadrb_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = memb($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14461004, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_211aaa, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8818,7 +8872,7 @@ def L2_loadrb_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111000;
let hasNewValue = 1;
@@ -8831,7 +8885,7 @@ def L2_loadrb_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_16303398 {
+tc_3eab77bd, TypeLD>, Enc_e0a47a {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001000;
let hasNewValue = 1;
@@ -8846,7 +8900,7 @@ def L2_loadrb_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001000;
let hasNewValue = 1;
@@ -8861,7 +8915,7 @@ def L2_loadrb_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5598813, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_222336, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011000;
let hasNewValue = 1;
@@ -8877,7 +8931,7 @@ def L2_loadrb_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101000;
let hasNewValue = 1;
@@ -8891,7 +8945,7 @@ def L2_loadrb_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8901,7 +8955,7 @@ def L2_loadrbgp : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memb(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -8920,7 +8974,7 @@ def L2_loadrd_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s29_3Imm:$Ii),
"$Rdd32 = memd($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_163381, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_fa3ba4, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8939,7 +8993,7 @@ def L2_loadrd_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111110;
let accessSize = DoubleWordAccess;
@@ -8950,7 +9004,7 @@ def L2_loadrd_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_931653 {
+tc_3eab77bd, TypeLD>, Enc_b05839 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001110;
let addrMode = PostInc;
@@ -8963,7 +9017,7 @@ def L2_loadrd_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001110;
let addrMode = PostInc;
@@ -8976,7 +9030,7 @@ def L2_loadrd_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii),
"$Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_9752128, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_5bdd42, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011110;
let addrMode = PostInc;
@@ -8990,7 +9044,7 @@ def L2_loadrd_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101110;
let addrMode = PostInc;
@@ -9002,7 +9056,7 @@ def L2_loadrd_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9010,7 +9064,7 @@ def L2_loadrdgp : HInst<
(outs DoubleRegs:$Rdd32),
(ins u29_3Imm:$Ii),
"$Rdd32 = memd(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4975051, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_509701, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b01001;
let accessSize = DoubleWordAccess;
@@ -9027,7 +9081,7 @@ def L2_loadrh_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_de0214, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9048,7 +9102,7 @@ def L2_loadrh_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111010;
let hasNewValue = 1;
@@ -9061,7 +9115,7 @@ def L2_loadrh_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001010;
let hasNewValue = 1;
@@ -9076,7 +9130,7 @@ def L2_loadrh_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001010;
let hasNewValue = 1;
@@ -9091,7 +9145,7 @@ def L2_loadrh_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_152467, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011010;
let hasNewValue = 1;
@@ -9107,7 +9161,7 @@ def L2_loadrh_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101010;
let hasNewValue = 1;
@@ -9121,7 +9175,7 @@ def L2_loadrh_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9131,7 +9185,7 @@ def L2_loadrhgp : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memh(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9150,7 +9204,7 @@ def L2_loadri_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rd32 = memw($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_8990840, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_2a3787, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9171,7 +9225,7 @@ def L2_loadri_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111100;
let hasNewValue = 1;
@@ -9184,7 +9238,7 @@ def L2_loadri_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14303394 {
+tc_3eab77bd, TypeLD>, Enc_27fd0e {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001100;
let hasNewValue = 1;
@@ -9199,7 +9253,7 @@ def L2_loadri_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001100;
let hasNewValue = 1;
@@ -9214,7 +9268,7 @@ def L2_loadri_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_16376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_3d920a, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011100;
let hasNewValue = 1;
@@ -9230,7 +9284,7 @@ def L2_loadri_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101100;
let hasNewValue = 1;
@@ -9244,7 +9298,7 @@ def L2_loadri_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9254,7 +9308,7 @@ def L2_loadrigp : HInst<
(outs IntRegs:$Rd32),
(ins u30_2Imm:$Ii),
"$Rd32 = memw(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_8814718, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_4f4ed7, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9273,7 +9327,7 @@ def L2_loadrub_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = memub($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14461004, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_211aaa, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9294,7 +9348,7 @@ def L2_loadrub_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111001;
let hasNewValue = 1;
@@ -9307,7 +9361,7 @@ def L2_loadrub_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_16303398 {
+tc_3eab77bd, TypeLD>, Enc_e0a47a {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001001;
let hasNewValue = 1;
@@ -9322,7 +9376,7 @@ def L2_loadrub_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001001;
let hasNewValue = 1;
@@ -9337,7 +9391,7 @@ def L2_loadrub_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5598813, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_222336, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011001;
let hasNewValue = 1;
@@ -9353,7 +9407,7 @@ def L2_loadrub_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101001;
let hasNewValue = 1;
@@ -9367,7 +9421,7 @@ def L2_loadrub_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9377,7 +9431,7 @@ def L2_loadrubgp : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memub(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9396,7 +9450,7 @@ def L2_loadruh_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memuh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_de0214, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9417,7 +9471,7 @@ def L2_loadruh_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111011;
let hasNewValue = 1;
@@ -9430,7 +9484,7 @@ def L2_loadruh_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001011;
let hasNewValue = 1;
@@ -9445,7 +9499,7 @@ def L2_loadruh_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001011;
let hasNewValue = 1;
@@ -9460,7 +9514,7 @@ def L2_loadruh_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_152467, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011011;
let hasNewValue = 1;
@@ -9476,7 +9530,7 @@ def L2_loadruh_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101011;
let hasNewValue = 1;
@@ -9490,7 +9544,7 @@ def L2_loadruh_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9500,7 +9554,7 @@ def L2_loadruhgp : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memuh(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9519,20 +9573,20 @@ def L2_loadw_locked : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memw_locked($Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4075554 {
+tc_29c14515, TypeLD>, Enc_5e2823 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010010000;
let hasNewValue = 1;
let opNewValue = 0;
let accessSize = WordAccess;
-let isSoloAX = 1;
let mayLoad = 1;
+let isSoloAX = 1;
}
def L2_ploadrbf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101000;
let isPredicated = 1;
@@ -9554,7 +9608,7 @@ def L2_ploadrbf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9571,7 +9625,7 @@ def L2_ploadrbf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9581,7 +9635,7 @@ def L2_ploadrbfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111000;
let isPredicated = 1;
@@ -9604,7 +9658,7 @@ def L2_ploadrbfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9622,7 +9676,7 @@ def L2_ploadrbfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9632,7 +9686,7 @@ def L2_ploadrbt_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001000;
let isPredicated = 1;
@@ -9653,7 +9707,7 @@ def L2_ploadrbt_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9669,7 +9723,7 @@ def L2_ploadrbt_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9679,7 +9733,7 @@ def L2_ploadrbtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011000;
let isPredicated = 1;
@@ -9701,7 +9755,7 @@ def L2_ploadrbtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9718,7 +9772,7 @@ def L2_ploadrbtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9728,7 +9782,7 @@ def L2_ploadrdf_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101110;
let isPredicated = 1;
@@ -9748,7 +9802,7 @@ def L2_ploadrdf_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_ae762521, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9763,7 +9817,7 @@ def L2_ploadrdf_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9771,7 +9825,7 @@ def L2_ploadrdfnew_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111110;
let isPredicated = 1;
@@ -9792,7 +9846,7 @@ def L2_ploadrdfnew_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_e578178f, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9808,7 +9862,7 @@ def L2_ploadrdfnew_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9816,7 +9870,7 @@ def L2_ploadrdt_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if ($Pt4) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001110;
let isPredicated = 1;
@@ -9835,7 +9889,7 @@ def L2_ploadrdt_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if ($Pt4) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_ae762521, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9849,7 +9903,7 @@ def L2_ploadrdt_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9857,7 +9911,7 @@ def L2_ploadrdtnew_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011110;
let isPredicated = 1;
@@ -9877,7 +9931,7 @@ def L2_ploadrdtnew_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_e578178f, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9892,7 +9946,7 @@ def L2_ploadrdtnew_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9900,7 +9954,7 @@ def L2_ploadrhf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101010;
let isPredicated = 1;
@@ -9922,7 +9976,7 @@ def L2_ploadrhf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -9939,7 +9993,7 @@ def L2_ploadrhf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9949,7 +10003,7 @@ def L2_ploadrhfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111010;
let isPredicated = 1;
@@ -9972,7 +10026,7 @@ def L2_ploadrhfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -9990,7 +10044,7 @@ def L2_ploadrhfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10000,7 +10054,7 @@ def L2_ploadrht_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001010;
let isPredicated = 1;
@@ -10021,7 +10075,7 @@ def L2_ploadrht_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -10037,7 +10091,7 @@ def L2_ploadrht_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10047,7 +10101,7 @@ def L2_ploadrhtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011010;
let isPredicated = 1;
@@ -10069,7 +10123,7 @@ def L2_ploadrhtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -10086,7 +10140,7 @@ def L2_ploadrhtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10096,7 +10150,7 @@ def L2_ploadrif_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if (!$Pt4) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101100;
let isPredicated = 1;
@@ -10118,7 +10172,7 @@ def L2_ploadrif_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if (!$Pt4) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10135,7 +10189,7 @@ def L2_ploadrif_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10145,7 +10199,7 @@ def L2_ploadrifnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111100;
let isPredicated = 1;
@@ -10168,7 +10222,7 @@ def L2_ploadrifnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10186,7 +10240,7 @@ def L2_ploadrifnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10196,7 +10250,7 @@ def L2_ploadrit_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if ($Pt4) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001100;
let isPredicated = 1;
@@ -10217,7 +10271,7 @@ def L2_ploadrit_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if ($Pt4) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10233,7 +10287,7 @@ def L2_ploadrit_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10243,7 +10297,7 @@ def L2_ploadritnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011100;
let isPredicated = 1;
@@ -10265,7 +10319,7 @@ def L2_ploadritnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10282,7 +10336,7 @@ def L2_ploadritnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10292,7 +10346,7 @@ def L2_ploadrubf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101001;
let isPredicated = 1;
@@ -10314,7 +10368,7 @@ def L2_ploadrubf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10331,7 +10385,7 @@ def L2_ploadrubf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10341,7 +10395,7 @@ def L2_ploadrubfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111001;
let isPredicated = 1;
@@ -10364,7 +10418,7 @@ def L2_ploadrubfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10382,7 +10436,7 @@ def L2_ploadrubfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10392,7 +10446,7 @@ def L2_ploadrubt_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001001;
let isPredicated = 1;
@@ -10413,7 +10467,7 @@ def L2_ploadrubt_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10429,7 +10483,7 @@ def L2_ploadrubt_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10439,7 +10493,7 @@ def L2_ploadrubtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011001;
let isPredicated = 1;
@@ -10461,7 +10515,7 @@ def L2_ploadrubtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10478,7 +10532,7 @@ def L2_ploadrubtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10488,7 +10542,7 @@ def L2_ploadruhf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101011;
let isPredicated = 1;
@@ -10510,7 +10564,7 @@ def L2_ploadruhf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10527,7 +10581,7 @@ def L2_ploadruhf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10537,7 +10591,7 @@ def L2_ploadruhfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111011;
let isPredicated = 1;
@@ -10560,7 +10614,7 @@ def L2_ploadruhfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10578,7 +10632,7 @@ def L2_ploadruhfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10588,7 +10642,7 @@ def L2_ploadruht_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001011;
let isPredicated = 1;
@@ -10609,7 +10663,7 @@ def L2_ploadruht_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10625,7 +10679,7 @@ def L2_ploadruht_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10635,7 +10689,7 @@ def L2_ploadruhtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011011;
let isPredicated = 1;
@@ -10657,7 +10711,7 @@ def L2_ploadruhtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10674,7 +10728,7 @@ def L2_ploadruhtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10684,14 +10738,14 @@ def L4_add_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10702,7 +10756,7 @@ def L4_add_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10710,14 +10764,14 @@ def L4_add_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10728,7 +10782,7 @@ def L4_add_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10736,14 +10790,14 @@ def L4_add_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10754,7 +10808,7 @@ def L4_add_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10762,14 +10816,14 @@ def L4_and_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10780,7 +10834,7 @@ def L4_and_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10788,14 +10842,14 @@ def L4_and_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10806,7 +10860,7 @@ def L4_and_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10814,14 +10868,14 @@ def L4_and_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10832,7 +10886,7 @@ def L4_and_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10840,14 +10894,14 @@ def L4_iadd_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10858,7 +10912,7 @@ def L4_iadd_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10866,14 +10920,14 @@ def L4_iadd_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10884,7 +10938,7 @@ def L4_iadd_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10892,14 +10946,14 @@ def L4_iadd_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10910,7 +10964,7 @@ def L4_iadd_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10918,14 +10972,14 @@ def L4_iand_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10936,7 +10990,7 @@ def L4_iand_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10944,14 +10998,14 @@ def L4_iand_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10962,7 +11016,7 @@ def L4_iand_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10970,14 +11024,14 @@ def L4_iand_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10988,7 +11042,7 @@ def L4_iand_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10996,14 +11050,14 @@ def L4_ior_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11014,7 +11068,7 @@ def L4_ior_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11022,14 +11076,14 @@ def L4_ior_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11040,7 +11094,7 @@ def L4_ior_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11048,14 +11102,14 @@ def L4_ior_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11066,7 +11120,7 @@ def L4_ior_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11074,14 +11128,14 @@ def L4_isub_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11092,7 +11146,7 @@ def L4_isub_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11100,14 +11154,14 @@ def L4_isub_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11118,7 +11172,7 @@ def L4_isub_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11126,14 +11180,14 @@ def L4_isub_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11144,7 +11198,7 @@ def L4_isub_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11152,7 +11206,7 @@ def L4_loadalignb_ap : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Re32),
(ins DoubleRegs:$Ryy32in, u32_0Imm:$II),
"$Ryy32 = memb_fifo($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11047413 {
+tc_261d9b78, TypeLD>, Enc_f394d3 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010100;
@@ -11160,8 +11214,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 3;
@@ -11174,13 +11228,13 @@ def L4_loadalignb_ur : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Ryy32 = memb_fifo($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_7303598 {
+tc_baccf077, TypeLD>, Enc_04c959 {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100100;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11194,7 +11248,7 @@ def L4_loadalignh_ap : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Re32),
(ins DoubleRegs:$Ryy32in, u32_0Imm:$II),
"$Ryy32 = memh_fifo($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11047413 {
+tc_261d9b78, TypeLD>, Enc_f394d3 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010010;
@@ -11202,8 +11256,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 3;
@@ -11216,13 +11270,13 @@ def L4_loadalignh_ur : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Ryy32 = memh_fifo($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_7303598 {
+tc_baccf077, TypeLD>, Enc_04c959 {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100010;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11236,7 +11290,7 @@ def L4_loadbsw2_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = membh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010001;
@@ -11246,8 +11300,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11259,15 +11313,15 @@ def L4_loadbsw2_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = membh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163 {
+tc_7d9a56cd, TypeLD>, Enc_4f677b {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11280,7 +11334,7 @@ def L4_loadbsw4_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = membh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010111;
@@ -11288,8 +11342,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11301,13 +11355,13 @@ def L4_loadbsw4_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = membh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416 {
+tc_7d9a56cd, TypeLD>, Enc_6185fe {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100111;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11320,7 +11374,7 @@ def L4_loadbzw2_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memubh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010011;
@@ -11330,8 +11384,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11343,15 +11397,15 @@ def L4_loadbzw2_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memubh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163 {
+tc_7d9a56cd, TypeLD>, Enc_4f677b {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100011;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11364,7 +11418,7 @@ def L4_loadbzw4_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = memubh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010101;
@@ -11372,8 +11426,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11385,13 +11439,13 @@ def L4_loadbzw4_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = memubh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416 {
+tc_7d9a56cd, TypeLD>, Enc_6185fe {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100101;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11404,18 +11458,18 @@ def L4_loadd_locked : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memd_locked($Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4030179 {
+tc_29c14515, TypeLD>, Enc_3a3d62 {
let Inst{13-5} = 0b010000000;
let Inst{31-21} = 0b10010010000;
let accessSize = DoubleWordAccess;
-let isSoloAX = 1;
let mayLoad = 1;
+let isSoloAX = 1;
}
def L4_loadrb_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memb($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011000;
@@ -11425,8 +11479,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11438,7 +11492,7 @@ def L4_loadrb_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010000;
let hasNewValue = 1;
@@ -11455,15 +11509,15 @@ def L4_loadrb_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memb($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101000;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11477,7 +11531,7 @@ def L4_loadrd_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = memd($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011110;
@@ -11485,8 +11539,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11498,7 +11552,7 @@ def L4_loadrd_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7581852, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_84bff1, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010110;
let addrMode = BaseRegOffset;
@@ -11513,13 +11567,13 @@ def L4_loadrd_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = memd($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_6185fe, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101110;
let addrMode = BaseLongOffset;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11533,7 +11587,7 @@ def L4_loadrh_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011010;
@@ -11543,8 +11597,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11556,7 +11610,7 @@ def L4_loadrh_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010010;
let hasNewValue = 1;
@@ -11573,15 +11627,15 @@ def L4_loadrh_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101010;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11595,7 +11649,7 @@ def L4_loadri_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memw($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011100;
@@ -11605,8 +11659,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11618,7 +11672,7 @@ def L4_loadri_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010100;
let hasNewValue = 1;
@@ -11635,15 +11689,15 @@ def L4_loadri_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memw($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101100;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11657,7 +11711,7 @@ def L4_loadrub_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memub($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011001;
@@ -11667,8 +11721,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11680,7 +11734,7 @@ def L4_loadrub_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010001;
let hasNewValue = 1;
@@ -11697,15 +11751,15 @@ def L4_loadrub_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memub($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11719,7 +11773,7 @@ def L4_loadruh_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memuh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011011;
@@ -11729,8 +11783,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11742,7 +11796,7 @@ def L4_loadruh_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010011;
let hasNewValue = 1;
@@ -11759,15 +11813,15 @@ def L4_loadruh_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memuh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101011;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11781,14 +11835,14 @@ def L4_or_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11799,7 +11853,7 @@ def L4_or_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11807,14 +11861,14 @@ def L4_or_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11825,7 +11879,7 @@ def L4_or_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11833,14 +11887,14 @@ def L4_or_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11851,7 +11905,7 @@ def L4_or_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11859,7 +11913,7 @@ def L4_ploadrbf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111000;
@@ -11869,8 +11923,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11884,7 +11938,7 @@ def L4_ploadrbf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -11901,7 +11955,7 @@ def L4_ploadrbfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111000;
@@ -11911,9 +11965,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11927,7 +11981,7 @@ def L4_ploadrbfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -11945,7 +11999,7 @@ def L4_ploadrbt_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111000;
@@ -11954,8 +12008,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11969,7 +12023,7 @@ def L4_ploadrbt_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000000;
let isPredicated = 1;
let hasNewValue = 1;
@@ -11985,7 +12039,7 @@ def L4_ploadrbtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111000;
@@ -11994,9 +12048,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -12010,7 +12064,7 @@ def L4_ploadrbtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010000;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12027,7 +12081,7 @@ def L4_ploadrdf_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111110;
@@ -12035,8 +12089,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12050,7 +12104,7 @@ def L4_ploadrdf_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110001110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12065,7 +12119,7 @@ def L4_ploadrdfnew_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111110;
@@ -12073,9 +12127,9 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12089,7 +12143,7 @@ def L4_ploadrdfnew_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110011110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12105,15 +12159,15 @@ def L4_ploadrdt_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111110;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12127,7 +12181,7 @@ def L4_ploadrdt_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110000110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -12141,16 +12195,16 @@ def L4_ploadrdtnew_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111110;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12164,7 +12218,7 @@ def L4_ploadrdtnew_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110010110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -12179,7 +12233,7 @@ def L4_ploadrhf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111010;
@@ -12189,8 +12243,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12204,7 +12258,7 @@ def L4_ploadrhf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12221,7 +12275,7 @@ def L4_ploadrhfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111010;
@@ -12231,9 +12285,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12247,7 +12301,7 @@ def L4_ploadrhfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12265,7 +12319,7 @@ def L4_ploadrht_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111010;
@@ -12274,8 +12328,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12289,7 +12343,7 @@ def L4_ploadrht_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000010;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12305,7 +12359,7 @@ def L4_ploadrhtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111010;
@@ -12314,9 +12368,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12330,7 +12384,7 @@ def L4_ploadrhtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010010;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12347,7 +12401,7 @@ def L4_ploadrif_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111100;
@@ -12357,8 +12411,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12372,7 +12426,7 @@ def L4_ploadrif_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12389,7 +12443,7 @@ def L4_ploadrifnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111100;
@@ -12399,9 +12453,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12415,7 +12469,7 @@ def L4_ploadrifnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12433,7 +12487,7 @@ def L4_ploadrit_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111100;
@@ -12442,8 +12496,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12457,7 +12511,7 @@ def L4_ploadrit_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12473,7 +12527,7 @@ def L4_ploadritnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111100;
@@ -12482,9 +12536,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12498,7 +12552,7 @@ def L4_ploadritnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12515,7 +12569,7 @@ def L4_ploadrubf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111001;
@@ -12525,8 +12579,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12540,7 +12594,7 @@ def L4_ploadrubf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001001;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12557,7 +12611,7 @@ def L4_ploadrubfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111001;
@@ -12567,9 +12621,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12583,7 +12637,7 @@ def L4_ploadrubfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011001;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12601,7 +12655,7 @@ def L4_ploadrubt_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111001;
@@ -12610,8 +12664,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12625,7 +12679,7 @@ def L4_ploadrubt_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000001;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12641,7 +12695,7 @@ def L4_ploadrubtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111001;
@@ -12650,9 +12704,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12666,7 +12720,7 @@ def L4_ploadrubtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010001;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12683,7 +12737,7 @@ def L4_ploadruhf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111011;
@@ -12693,8 +12747,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12708,7 +12762,7 @@ def L4_ploadruhf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12725,7 +12779,7 @@ def L4_ploadruhfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111011;
@@ -12735,9 +12789,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12751,7 +12805,7 @@ def L4_ploadruhfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12769,7 +12823,7 @@ def L4_ploadruht_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111011;
@@ -12778,8 +12832,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12793,7 +12847,7 @@ def L4_ploadruht_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000011;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12809,7 +12863,7 @@ def L4_ploadruhtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111011;
@@ -12818,9 +12872,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12834,7 +12888,7 @@ def L4_ploadruhtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010011;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12851,7 +12905,7 @@ def L4_return : HInst<
(outs),
(ins),
"dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_0, PredNewRel {
+tc_dcfee7ae, TypeLD>, Enc_3a3d62, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010110000;
@@ -12873,7 +12927,7 @@ def L4_return_f : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4) dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_9ce7a5ab, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1100;
@@ -12885,8 +12939,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12896,7 +12950,7 @@ def L4_return_fnew_pnt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4.new) dealloc_return:nt",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
@@ -12908,9 +12962,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12920,7 +12974,7 @@ def L4_return_fnew_pt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4.new) dealloc_return:t",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1110;
@@ -12932,9 +12986,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12944,7 +12998,7 @@ def L4_return_t : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4) dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_9ce7a5ab, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0100;
@@ -12955,8 +13009,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12966,7 +13020,7 @@ def L4_return_tnew_pnt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4.new) dealloc_return:nt",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0010;
@@ -12977,9 +13031,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12989,7 +13043,7 @@ def L4_return_tnew_pt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4.new) dealloc_return:t",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0110;
@@ -13000,9 +13054,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -13012,14 +13066,14 @@ def L4_sub_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13030,7 +13084,7 @@ def L4_sub_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13038,14 +13092,14 @@ def L4_sub_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13056,7 +13110,7 @@ def L4_sub_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13064,14 +13118,14 @@ def L4_sub_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13082,7 +13136,7 @@ def L4_sub_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13090,7 +13144,7 @@ def M2_acci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += add($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889, ImmRegRel {
+tc_c0cd91a8, TypeM>, Enc_2ae154, ImmRegRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -13105,7 +13159,7 @@ def M2_accii : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 += add($Rs32,#$Ii)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_11522288, ImmRegRel {
+tc_c0cd91a8, TypeM>, Enc_c90aca, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100010000;
let hasNewValue = 1;
@@ -13124,7 +13178,7 @@ def M2_cmaci_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpyi($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13135,7 +13189,7 @@ def M2_cmacr_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpyr($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13146,7 +13200,7 @@ def M2_cmacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13158,7 +13212,7 @@ def M2_cmacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -13170,7 +13224,7 @@ def M2_cmacsc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13182,7 +13236,7 @@ def M2_cmacsc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -13194,7 +13248,7 @@ def M2_cmpyi_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpyi($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13204,7 +13258,7 @@ def M2_cmpyr_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpyr($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13214,7 +13268,7 @@ def M2_cmpyrs_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -13227,7 +13281,7 @@ def M2_cmpyrs_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13240,7 +13294,7 @@ def M2_cmpyrsc_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32*):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101011;
@@ -13253,7 +13307,7 @@ def M2_cmpyrsc_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32*):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -13266,7 +13320,7 @@ def M2_cmpys_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13277,7 +13331,7 @@ def M2_cmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -13288,7 +13342,7 @@ def M2_cmpysc_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -13299,7 +13353,7 @@ def M2_cmpysc_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101110;
@@ -13310,7 +13364,7 @@ def M2_cnacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13322,7 +13376,7 @@ def M2_cnacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -13334,7 +13388,7 @@ def M2_cnacsc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13346,7 +13400,7 @@ def M2_cnacsc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -13358,7 +13412,7 @@ def M2_dpmpyss_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13369,7 +13423,7 @@ def M2_dpmpyss_nac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -13380,7 +13434,7 @@ def M2_dpmpyss_rnd_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -13392,7 +13446,7 @@ def M2_dpmpyss_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13402,7 +13456,7 @@ def M2_dpmpyuu_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13413,7 +13467,7 @@ def M2_dpmpyuu_nac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111011;
@@ -13424,7 +13478,7 @@ def M2_dpmpyuu_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -13434,7 +13488,7 @@ def M2_hmmpyh_rs1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13447,7 +13501,7 @@ def M2_hmmpyh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13460,7 +13514,7 @@ def M2_hmmpyl_rs1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -13473,7 +13527,7 @@ def M2_hmmpyl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13486,7 +13540,7 @@ def M2_maci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyi($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889, ImmRegRel {
+tc_8cb685d9, TypeM>, Enc_2ae154, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -13501,7 +13555,7 @@ def M2_macsin : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rx32 -= mpyi($Rs32,#$Ii)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_11522288 {
+tc_a12a5971, TypeM>, Enc_c90aca {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100001100;
let hasNewValue = 1;
@@ -13519,7 +13573,7 @@ def M2_macsip : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rx32 += mpyi($Rs32,#$Ii)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_11522288, ImmRegRel {
+tc_a12a5971, TypeM>, Enc_c90aca, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100001000;
let hasNewValue = 1;
@@ -13538,7 +13592,7 @@ def M2_mmachs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -13550,7 +13604,7 @@ def M2_mmachs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -13562,7 +13616,7 @@ def M2_mmachs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -13574,7 +13628,7 @@ def M2_mmachs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -13586,7 +13640,7 @@ def M2_mmacls_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -13598,7 +13652,7 @@ def M2_mmacls_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -13610,7 +13664,7 @@ def M2_mmacls_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -13622,7 +13676,7 @@ def M2_mmacls_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -13634,7 +13688,7 @@ def M2_mmacuhs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -13646,7 +13700,7 @@ def M2_mmacuhs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -13658,7 +13712,7 @@ def M2_mmacuhs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -13670,7 +13724,7 @@ def M2_mmacuhs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -13682,7 +13736,7 @@ def M2_mmaculs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -13694,7 +13748,7 @@ def M2_mmaculs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -13706,7 +13760,7 @@ def M2_mmaculs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -13718,7 +13772,7 @@ def M2_mmaculs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -13730,7 +13784,7 @@ def M2_mmpyh_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -13741,7 +13795,7 @@ def M2_mmpyh_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -13752,7 +13806,7 @@ def M2_mmpyh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -13763,7 +13817,7 @@ def M2_mmpyh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -13774,7 +13828,7 @@ def M2_mmpyl_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -13785,7 +13839,7 @@ def M2_mmpyl_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -13796,7 +13850,7 @@ def M2_mmpyl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -13807,7 +13861,7 @@ def M2_mmpyl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -13818,7 +13872,7 @@ def M2_mmpyuh_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -13829,7 +13883,7 @@ def M2_mmpyuh_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -13840,7 +13894,7 @@ def M2_mmpyuh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -13851,7 +13905,7 @@ def M2_mmpyuh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -13862,7 +13916,7 @@ def M2_mmpyul_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -13873,7 +13927,7 @@ def M2_mmpyul_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -13884,7 +13938,7 @@ def M2_mmpyul_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -13895,7 +13949,7 @@ def M2_mmpyul_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -13906,7 +13960,7 @@ def M2_mpy_acc_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13919,7 +13973,7 @@ def M2_mpy_acc_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13932,7 +13986,7 @@ def M2_mpy_acc_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13945,7 +13999,7 @@ def M2_mpy_acc_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13958,7 +14012,7 @@ def M2_mpy_acc_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13971,7 +14025,7 @@ def M2_mpy_acc_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13984,7 +14038,7 @@ def M2_mpy_acc_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13997,7 +14051,7 @@ def M2_mpy_acc_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14010,7 +14064,7 @@ def M2_mpy_acc_sat_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14024,7 +14078,7 @@ def M2_mpy_acc_sat_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14038,7 +14092,7 @@ def M2_mpy_acc_sat_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14052,7 +14106,7 @@ def M2_mpy_acc_sat_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14066,7 +14120,7 @@ def M2_mpy_acc_sat_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14080,7 +14134,7 @@ def M2_mpy_acc_sat_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14094,7 +14148,7 @@ def M2_mpy_acc_sat_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14108,7 +14162,7 @@ def M2_mpy_acc_sat_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14122,7 +14176,7 @@ def M2_mpy_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14134,7 +14188,7 @@ def M2_mpy_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14146,7 +14200,7 @@ def M2_mpy_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14158,7 +14212,7 @@ def M2_mpy_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14170,7 +14224,7 @@ def M2_mpy_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14182,7 +14236,7 @@ def M2_mpy_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14194,7 +14248,7 @@ def M2_mpy_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14206,7 +14260,7 @@ def M2_mpy_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14218,7 +14272,7 @@ def M2_mpy_nac_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14231,7 +14285,7 @@ def M2_mpy_nac_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14244,7 +14298,7 @@ def M2_mpy_nac_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14257,7 +14311,7 @@ def M2_mpy_nac_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14270,7 +14324,7 @@ def M2_mpy_nac_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14283,7 +14337,7 @@ def M2_mpy_nac_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14296,7 +14350,7 @@ def M2_mpy_nac_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14309,7 +14363,7 @@ def M2_mpy_nac_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14322,7 +14376,7 @@ def M2_mpy_nac_sat_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14336,7 +14390,7 @@ def M2_mpy_nac_sat_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14350,7 +14404,7 @@ def M2_mpy_nac_sat_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14364,7 +14418,7 @@ def M2_mpy_nac_sat_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14378,7 +14432,7 @@ def M2_mpy_nac_sat_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14392,7 +14446,7 @@ def M2_mpy_nac_sat_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14406,7 +14460,7 @@ def M2_mpy_nac_sat_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14420,7 +14474,7 @@ def M2_mpy_nac_sat_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14434,7 +14488,7 @@ def M2_mpy_rnd_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14446,7 +14500,7 @@ def M2_mpy_rnd_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14458,7 +14512,7 @@ def M2_mpy_rnd_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14470,7 +14524,7 @@ def M2_mpy_rnd_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14482,7 +14536,7 @@ def M2_mpy_rnd_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14494,7 +14548,7 @@ def M2_mpy_rnd_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14506,7 +14560,7 @@ def M2_mpy_rnd_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14518,7 +14572,7 @@ def M2_mpy_rnd_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14530,7 +14584,7 @@ def M2_mpy_sat_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14543,7 +14597,7 @@ def M2_mpy_sat_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14556,7 +14610,7 @@ def M2_mpy_sat_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14569,7 +14623,7 @@ def M2_mpy_sat_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14582,7 +14636,7 @@ def M2_mpy_sat_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14595,7 +14649,7 @@ def M2_mpy_sat_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14608,7 +14662,7 @@ def M2_mpy_sat_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14621,7 +14675,7 @@ def M2_mpy_sat_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14634,7 +14688,7 @@ def M2_mpy_sat_rnd_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14647,7 +14701,7 @@ def M2_mpy_sat_rnd_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14660,7 +14714,7 @@ def M2_mpy_sat_rnd_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14673,7 +14727,7 @@ def M2_mpy_sat_rnd_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14686,7 +14740,7 @@ def M2_mpy_sat_rnd_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14699,7 +14753,7 @@ def M2_mpy_sat_rnd_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14712,7 +14766,7 @@ def M2_mpy_sat_rnd_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14725,7 +14779,7 @@ def M2_mpy_sat_rnd_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14738,7 +14792,7 @@ def M2_mpy_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101000;
@@ -14750,7 +14804,7 @@ def M2_mpy_up_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -14762,7 +14816,7 @@ def M2_mpy_up_s1_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -14775,7 +14829,7 @@ def M2_mpyd_acc_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14786,7 +14840,7 @@ def M2_mpyd_acc_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14797,7 +14851,7 @@ def M2_mpyd_acc_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14808,7 +14862,7 @@ def M2_mpyd_acc_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14819,7 +14873,7 @@ def M2_mpyd_acc_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14830,7 +14884,7 @@ def M2_mpyd_acc_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14841,7 +14895,7 @@ def M2_mpyd_acc_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14852,7 +14906,7 @@ def M2_mpyd_acc_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14863,7 +14917,7 @@ def M2_mpyd_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14873,7 +14927,7 @@ def M2_mpyd_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14883,7 +14937,7 @@ def M2_mpyd_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14893,7 +14947,7 @@ def M2_mpyd_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14903,7 +14957,7 @@ def M2_mpyd_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14913,7 +14967,7 @@ def M2_mpyd_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14923,7 +14977,7 @@ def M2_mpyd_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14933,7 +14987,7 @@ def M2_mpyd_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14943,7 +14997,7 @@ def M2_mpyd_nac_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14954,7 +15008,7 @@ def M2_mpyd_nac_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -14965,7 +15019,7 @@ def M2_mpyd_nac_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14976,7 +15030,7 @@ def M2_mpyd_nac_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -14987,7 +15041,7 @@ def M2_mpyd_nac_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14998,7 +15052,7 @@ def M2_mpyd_nac_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -15009,7 +15063,7 @@ def M2_mpyd_nac_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -15020,7 +15074,7 @@ def M2_mpyd_nac_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -15031,7 +15085,7 @@ def M2_mpyd_rnd_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15041,7 +15095,7 @@ def M2_mpyd_rnd_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15051,7 +15105,7 @@ def M2_mpyd_rnd_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15061,7 +15115,7 @@ def M2_mpyd_rnd_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15071,7 +15125,7 @@ def M2_mpyd_rnd_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15081,7 +15135,7 @@ def M2_mpyd_rnd_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15091,7 +15145,7 @@ def M2_mpyd_rnd_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15101,7 +15155,7 @@ def M2_mpyd_rnd_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15111,7 +15165,7 @@ def M2_mpyi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyi($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773, ImmRegRel {
+tc_8c8041e6, TypeM>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101000;
@@ -15125,7 +15179,7 @@ def M2_mpysin : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Rd32 = -mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, Enc_16355964 {
+tc_ae2c2dc2, TypeM>, Enc_b8c967 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100000100;
let hasNewValue = 1;
@@ -15136,7 +15190,7 @@ def M2_mpysip : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rd32 = +mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, Enc_16355964 {
+tc_ae2c2dc2, TypeM>, Enc_b8c967 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100000000;
let hasNewValue = 1;
@@ -15152,7 +15206,7 @@ def M2_mpysmi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, m32_0Imm:$Ii),
"$Rd32 = mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, ImmRegRel {
+tc_ae2c2dc2, TypeM>, ImmRegRel {
let hasNewValue = 1;
let opNewValue = 0;
let CextOpcode = "M2_mpyi";
@@ -15168,7 +15222,7 @@ def M2_mpysu_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpysu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101011;
@@ -15180,7 +15234,7 @@ def M2_mpyu_acc_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15193,7 +15247,7 @@ def M2_mpyu_acc_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15206,7 +15260,7 @@ def M2_mpyu_acc_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15219,7 +15273,7 @@ def M2_mpyu_acc_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15232,7 +15286,7 @@ def M2_mpyu_acc_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15245,7 +15299,7 @@ def M2_mpyu_acc_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15258,7 +15312,7 @@ def M2_mpyu_acc_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15271,7 +15325,7 @@ def M2_mpyu_acc_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15284,7 +15338,7 @@ def M2_mpyu_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15296,7 +15350,7 @@ def M2_mpyu_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15308,7 +15362,7 @@ def M2_mpyu_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15320,7 +15374,7 @@ def M2_mpyu_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15332,7 +15386,7 @@ def M2_mpyu_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15344,7 +15398,7 @@ def M2_mpyu_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15356,7 +15410,7 @@ def M2_mpyu_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15368,7 +15422,7 @@ def M2_mpyu_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15380,7 +15434,7 @@ def M2_mpyu_nac_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15393,7 +15447,7 @@ def M2_mpyu_nac_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15406,7 +15460,7 @@ def M2_mpyu_nac_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15419,7 +15473,7 @@ def M2_mpyu_nac_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15432,7 +15486,7 @@ def M2_mpyu_nac_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15445,7 +15499,7 @@ def M2_mpyu_nac_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15458,7 +15512,7 @@ def M2_mpyu_nac_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15471,7 +15525,7 @@ def M2_mpyu_nac_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15484,7 +15538,7 @@ def M2_mpyu_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101010;
@@ -15496,7 +15550,7 @@ def M2_mpyud_acc_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15507,7 +15561,7 @@ def M2_mpyud_acc_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15518,7 +15572,7 @@ def M2_mpyud_acc_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15529,7 +15583,7 @@ def M2_mpyud_acc_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15540,7 +15594,7 @@ def M2_mpyud_acc_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15551,7 +15605,7 @@ def M2_mpyud_acc_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15562,7 +15616,7 @@ def M2_mpyud_acc_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15573,7 +15627,7 @@ def M2_mpyud_acc_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15584,7 +15638,7 @@ def M2_mpyud_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15594,7 +15648,7 @@ def M2_mpyud_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15604,7 +15658,7 @@ def M2_mpyud_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15614,7 +15668,7 @@ def M2_mpyud_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15624,7 +15678,7 @@ def M2_mpyud_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15634,7 +15688,7 @@ def M2_mpyud_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15644,7 +15698,7 @@ def M2_mpyud_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15654,7 +15708,7 @@ def M2_mpyud_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15664,7 +15718,7 @@ def M2_mpyud_nac_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15675,7 +15729,7 @@ def M2_mpyud_nac_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15686,7 +15740,7 @@ def M2_mpyud_nac_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15697,7 +15751,7 @@ def M2_mpyud_nac_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15708,7 +15762,7 @@ def M2_mpyud_nac_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15719,7 +15773,7 @@ def M2_mpyud_nac_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15730,7 +15784,7 @@ def M2_mpyud_nac_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15741,7 +15795,7 @@ def M2_mpyud_nac_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15752,7 +15806,7 @@ def M2_mpyui : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyui($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -15762,7 +15816,7 @@ def M2_nacci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= add($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_c0cd91a8, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111100;
@@ -15776,7 +15830,7 @@ def M2_naccii : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 -= add($Rs32,#$Ii)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_11522288 {
+tc_c0cd91a8, TypeM>, Enc_c90aca {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100010100;
let hasNewValue = 1;
@@ -15794,7 +15848,7 @@ def M2_subacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rt32, IntRegs:$Rs32),
"$Rx32 += sub($Rt32,$Rs32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_7692963 {
+tc_c0cd91a8, TypeM>, Enc_a568d4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -15808,7 +15862,7 @@ def M2_vabsdiffh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffh($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333 {
+tc_63cd9d2d, TypeM>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -15818,7 +15872,7 @@ def M2_vabsdiffw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffw($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333 {
+tc_63cd9d2d, TypeM>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -15828,7 +15882,7 @@ def M2_vcmac_s0_sat_i : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vcmpyi($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -15840,7 +15894,7 @@ def M2_vcmac_s0_sat_r : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vcmpyr($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -15852,7 +15906,7 @@ def M2_vcmpy_s0_sat_i : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyi($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -15863,7 +15917,7 @@ def M2_vcmpy_s0_sat_r : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyr($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -15874,7 +15928,7 @@ def M2_vcmpy_s1_sat_i : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyi($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -15885,7 +15939,7 @@ def M2_vcmpy_s1_sat_r : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyr($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -15896,7 +15950,7 @@ def M2_vdmacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpy($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -15908,7 +15962,7 @@ def M2_vdmacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpy($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -15920,7 +15974,7 @@ def M2_vdmpyrs_s0 : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vdmpy($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001000;
@@ -15933,7 +15987,7 @@ def M2_vdmpyrs_s1 : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vdmpy($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001100;
@@ -15946,7 +16000,7 @@ def M2_vdmpys_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpy($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -15957,7 +16011,7 @@ def M2_vdmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpy($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -15968,7 +16022,7 @@ def M2_vmac2 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -15979,7 +16033,7 @@ def M2_vmac2es : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -15990,7 +16044,7 @@ def M2_vmac2es_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16002,7 +16056,7 @@ def M2_vmac2es_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -16014,7 +16068,7 @@ def M2_vmac2s_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -16026,7 +16080,7 @@ def M2_vmac2s_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -16038,7 +16092,7 @@ def M2_vmac2su_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyhsu($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111011;
@@ -16050,7 +16104,7 @@ def M2_vmac2su_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyhsu($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111111;
@@ -16062,7 +16116,7 @@ def M2_vmpy2es_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyeh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16073,7 +16127,7 @@ def M2_vmpy2es_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyeh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -16084,7 +16138,7 @@ def M2_vmpy2s_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyh($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -16095,7 +16149,7 @@ def M2_vmpy2s_s0pack : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vmpyh($Rs32,$Rt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -16108,7 +16162,7 @@ def M2_vmpy2s_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyh($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16119,7 +16173,7 @@ def M2_vmpy2s_s1pack : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vmpyh($Rs32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -16132,7 +16186,7 @@ def M2_vmpy2su_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyhsu($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -16143,7 +16197,7 @@ def M2_vmpy2su_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyhsu($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16154,7 +16208,7 @@ def M2_vraddh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vraddh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001001;
@@ -16166,7 +16220,7 @@ def M2_vradduh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vradduh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001000;
@@ -16178,7 +16232,7 @@ def M2_vrcmaci_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyi($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16189,7 +16243,7 @@ def M2_vrcmaci_s0c : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyi($Rss32,$Rtt32*)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -16200,7 +16254,7 @@ def M2_vrcmacr_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyr($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16211,7 +16265,7 @@ def M2_vrcmacr_s0c : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyr($Rss32,$Rtt32*)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -16222,7 +16276,7 @@ def M2_vrcmpyi_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyi($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16232,7 +16286,7 @@ def M2_vrcmpyi_s0c : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyi($Rss32,$Rtt32*)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -16242,7 +16296,7 @@ def M2_vrcmpyr_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyr($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16252,7 +16306,7 @@ def M2_vrcmpyr_s0c : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyr($Rss32,$Rtt32*)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -16262,7 +16316,7 @@ def M2_vrcmpys_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += vrcmpys($Rss32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8cb685d9, TypeM> {
let isPseudo = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
@@ -16270,7 +16324,7 @@ def M2_vrcmpys_acc_s1_h : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpys($Rss32,$Rtt32):<<1:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -16282,7 +16336,7 @@ def M2_vrcmpys_acc_s1_l : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpys($Rss32,$Rtt32):<<1:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -16294,14 +16348,14 @@ def M2_vrcmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vrcmpys($Rss32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let isPseudo = 1;
}
def M2_vrcmpys_s1_h : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpys($Rss32,$Rtt32):<<1:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16312,7 +16366,7 @@ def M2_vrcmpys_s1_l : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpys($Rss32,$Rtt32):<<1:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -16323,7 +16377,7 @@ def M2_vrcmpys_s1rp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = vrcmpys($Rss32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -16332,7 +16386,7 @@ def M2_vrcmpys_s1rp_h : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vrcmpys($Rss32,$Rtt32):<<1:rnd:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001101;
@@ -16345,7 +16399,7 @@ def M2_vrcmpys_s1rp_l : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vrcmpys($Rss32,$Rtt32):<<1:rnd:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001101;
@@ -16358,7 +16412,7 @@ def M2_vrmac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16369,7 +16423,7 @@ def M2_vrmpy_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16379,7 +16433,7 @@ def M2_xor_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111100;
@@ -16393,7 +16447,7 @@ def M4_and_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16407,7 +16461,7 @@ def M4_and_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16421,7 +16475,7 @@ def M4_and_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16435,7 +16489,7 @@ def M4_and_xor : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16449,7 +16503,7 @@ def M4_cmpyi_wh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyiwh($Rss32,$Rt32):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16462,7 +16516,7 @@ def M4_cmpyi_whc : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyiwh($Rss32,$Rt32*):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645, Requires<[HasV5T]> {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16475,7 +16529,7 @@ def M4_cmpyr_wh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyrwh($Rss32,$Rt32):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16488,7 +16542,7 @@ def M4_cmpyr_whc : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyrwh($Rss32,$Rt32*):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645, Requires<[HasV5T]> {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16501,7 +16555,7 @@ def M4_mac_up_s1_sat : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
@@ -16516,7 +16570,7 @@ def M4_mpyri_addi : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii, IntRegs:$Rs32, u6_0Imm:$II),
"$Rd32 = add(#$Ii,mpyi($Rs32,#$II))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_971574, ImmRegRel {
+tc_a12a5971, TypeALU64>, Enc_322e1b, ImmRegRel {
let Inst{31-24} = 0b11011000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16532,7 +16586,7 @@ def M4_mpyri_addr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Ru32, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rd32 = add($Ru32,mpyi($Rs32,#$Ii))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_236434, ImmRegRel {
+tc_a12a5971, TypeALU64>, Enc_420cf3, ImmRegRel {
let Inst{31-23} = 0b110111111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16549,7 +16603,7 @@ def M4_mpyri_addr_u2 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Ru32, u6_2Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = add($Ru32,mpyi(#$Ii,$Rs32))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_9959498 {
+tc_69bb508b, TypeALU64>, Enc_277737 {
let Inst{31-23} = 0b110111110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16559,7 +16613,7 @@ def M4_mpyrr_addi : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add(#$Ii,mpyi($Rs32,$Rt32))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_2216485, ImmRegRel {
+tc_8cb685d9, TypeALU64>, Enc_a7b8e8, ImmRegRel {
let Inst{31-23} = 0b110101110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16576,7 +16630,7 @@ def M4_mpyrr_addr : HInst<
(outs IntRegs:$Ry32),
(ins IntRegs:$Ru32, IntRegs:$Ry32in, IntRegs:$Rs32),
"$Ry32 = add($Ru32,mpyi($Ry32in,$Rs32))",
-M_tc_3x_SLOT23, TypeM>, Enc_13770697, ImmRegRel {
+tc_8cb685d9, TypeM>, Enc_7f1a05, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100011000;
@@ -16591,7 +16645,7 @@ def M4_nac_up_s1_sat : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
@@ -16606,7 +16660,7 @@ def M4_or_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16620,7 +16674,7 @@ def M4_or_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16634,7 +16688,7 @@ def M4_or_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16648,7 +16702,7 @@ def M4_or_xor : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16662,7 +16716,7 @@ def M4_pmpyw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = pmpyw($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -16672,7 +16726,7 @@ def M4_pmpyw_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 ^= pmpyw($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -16683,7 +16737,7 @@ def M4_vpmpyh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vpmpyh($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101110;
@@ -16693,7 +16747,7 @@ def M4_vpmpyh_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 ^= vpmpyh($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111101;
@@ -16704,7 +16758,7 @@ def M4_vrmpyeh_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyweh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -16715,7 +16769,7 @@ def M4_vrmpyeh_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyweh($Rss32,$Rtt32):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -16726,7 +16780,7 @@ def M4_vrmpyeh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyweh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -16736,7 +16790,7 @@ def M4_vrmpyeh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyweh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -16746,7 +16800,7 @@ def M4_vrmpyoh_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpywoh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -16757,7 +16811,7 @@ def M4_vrmpyoh_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpywoh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -16768,7 +16822,7 @@ def M4_vrmpyoh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpywoh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -16778,7 +16832,7 @@ def M4_vrmpyoh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpywoh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16788,7 +16842,7 @@ def M4_xor_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16802,7 +16856,7 @@ def M4_xor_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16816,7 +16870,7 @@ def M4_xor_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16830,7 +16884,7 @@ def M4_xor_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 ^= xor($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_12702821 {
+tc_3c10f809, TypeS_3op>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001010100;
@@ -16841,7 +16895,7 @@ def M5_vdmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpybsu($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821, Requires<[HasV5T]> {
+tc_8cb685d9, TypeM>, Enc_88c16c, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -16853,7 +16907,7 @@ def M5_vdmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpybsu($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157, Requires<[HasV5T]> {
+tc_8c8041e6, TypeM>, Enc_a56825, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16864,7 +16918,7 @@ def M5_vmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpybsu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -16875,7 +16929,7 @@ def M5_vmacbuu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpybu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -16886,7 +16940,7 @@ def M5_vmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpybsu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -16896,7 +16950,7 @@ def M5_vmpybuu : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpybu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16906,7 +16960,7 @@ def M5_vrmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpybsu($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -16917,7 +16971,7 @@ def M5_vrmacbuu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpybu($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -16928,7 +16982,7 @@ def M5_vrmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpybsu($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -16938,7 +16992,7 @@ def M5_vrmpybuu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpybu($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -16948,7 +17002,7 @@ def M6_vabsdiffb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffb($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> {
+tc_faab1248, TypeM>, Enc_ea23e4, Requires<[HasV62T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -16958,7 +17012,7 @@ def M6_vabsdiffub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffub($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> {
+tc_faab1248, TypeM>, Enc_ea23e4, Requires<[HasV62T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16968,15 +17022,15 @@ def PS_loadrbabs : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memb(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let isPredicable = 1;
@@ -16991,13 +17045,13 @@ def PS_loadrdabs : HInst<
(outs DoubleRegs:$Rdd32),
(ins u29_3Imm:$Ii),
"$Rdd32 = memd(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4975051, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_509701, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let isPredicable = 1;
@@ -17012,15 +17066,15 @@ def PS_loadrhabs : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memh(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let isPredicable = 1;
@@ -17035,15 +17089,15 @@ def PS_loadriabs : HInst<
(outs IntRegs:$Rd32),
(ins u30_2Imm:$Ii),
"$Rd32 = memw(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_8814718, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_4f4ed7, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let isPredicable = 1;
@@ -17058,15 +17112,15 @@ def PS_loadrubabs : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memub(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let isPredicable = 1;
@@ -17081,15 +17135,15 @@ def PS_loadruhabs : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memuh(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let isPredicable = 1;
@@ -17104,7 +17158,7 @@ def PS_storerbabs : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Rt32),
"memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_12395768, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_1b64fb, AddrModeRel {
let Inst{24-21} = 0b0000;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17126,16 +17180,16 @@ def PS_storerbnewabs : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Nt8),
"memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_4050532, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_ad1831, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let isPredicable = 1;
@@ -17151,7 +17205,7 @@ def PS_storerdabs : HInst<
(outs),
(ins u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_11682941, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_5c124a, AddrModeRel {
let Inst{24-21} = 0b0110;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17172,7 +17226,7 @@ def PS_storerfabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17193,7 +17247,7 @@ def PS_storerhabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17215,16 +17269,16 @@ def PS_storerhnewabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Nt8),
"memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_13618890, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_bc03e5, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let isPredicable = 1;
@@ -17240,7 +17294,7 @@ def PS_storeriabs : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Rt32),
"memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_15999208, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_541f26, AddrModeRel {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17262,16 +17316,16 @@ def PS_storerinewabs : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Nt8),
"memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_12297800, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_78cbf0, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let isPredicable = 1;
@@ -17287,7 +17341,7 @@ def S2_addasl_rrri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32, u3_0Imm:$Ii),
"$Rd32 = addasl($Rt32,$Rs32,#$Ii)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_3494181 {
+tc_090485bb, TypeS_3op>, Enc_47ef61 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000100000;
let hasNewValue = 1;
@@ -17298,7 +17352,7 @@ def S2_allocframe : HInst<
(outs),
(ins u11_3Imm:$Ii),
"allocframe(#$Ii)",
-ST_tc_ld_SLOT0, TypeST>, Enc_15830826 {
+tc_0cb867f2, TypeST>, Enc_22c845 {
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10100000100;
let Inst{20-16} = 0b11101;
@@ -17312,7 +17366,7 @@ def S2_asl_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asl($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000000000;
}
@@ -17320,7 +17374,7 @@ def S2_asl_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17330,7 +17384,7 @@ def S2_asl_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17340,7 +17394,7 @@ def S2_asl_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17350,7 +17404,7 @@ def S2_asl_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17360,7 +17414,7 @@ def S2_asl_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -17370,7 +17424,7 @@ def S2_asl_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asl($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -17381,7 +17435,7 @@ def S2_asl_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17394,7 +17448,7 @@ def S2_asl_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17407,7 +17461,7 @@ def S2_asl_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17420,7 +17474,7 @@ def S2_asl_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17433,19 +17487,20 @@ def S2_asl_i_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asl($Rs32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_47ab9233, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asl_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -17458,7 +17513,7 @@ def S2_asl_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vaslh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -17467,7 +17522,7 @@ def S2_asl_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vaslw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -17476,7 +17531,7 @@ def S2_asl_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = asl($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -17485,7 +17540,7 @@ def S2_asl_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -17496,7 +17551,7 @@ def S2_asl_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -17507,7 +17562,7 @@ def S2_asl_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -17518,7 +17573,7 @@ def S2_asl_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -17529,7 +17584,7 @@ def S2_asl_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -17540,7 +17595,7 @@ def S2_asl_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asl($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -17551,7 +17606,7 @@ def S2_asl_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -17564,7 +17619,7 @@ def S2_asl_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -17577,7 +17632,7 @@ def S2_asl_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -17590,7 +17645,7 @@ def S2_asl_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -17603,19 +17658,20 @@ def S2_asl_r_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asl($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_47ab9233, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asl_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vaslh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -17624,7 +17680,7 @@ def S2_asl_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vaslw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -17633,7 +17689,7 @@ def S2_asr_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asr($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000000000;
}
@@ -17641,7 +17697,7 @@ def S2_asr_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17651,7 +17707,7 @@ def S2_asr_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17661,7 +17717,7 @@ def S2_asr_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17671,7 +17727,7 @@ def S2_asr_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17681,7 +17737,7 @@ def S2_asr_i_p_rnd : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asr($Rss32,#$Ii):rnd",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_5eac98, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000000110;
let prefersSlot3 = 1;
@@ -17690,14 +17746,14 @@ def S2_asr_i_p_rnd_goodsyntax : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asrrnd($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let isPseudo = 1;
}
def S2_asr_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asr($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -17708,7 +17764,7 @@ def S2_asr_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17721,7 +17777,7 @@ def S2_asr_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17734,7 +17790,7 @@ def S2_asr_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17747,7 +17803,7 @@ def S2_asr_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17760,7 +17816,7 @@ def S2_asr_i_r_rnd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asr($Rs32,#$Ii):rnd",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100010;
@@ -17772,7 +17828,7 @@ def S2_asr_i_r_rnd_goodsyntax : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asrrnd($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op> {
+tc_63cd9d2d, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -17781,18 +17837,19 @@ def S2_asr_i_svw_trun : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rd32 = vasrw($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2380082 {
+tc_7ca2ea10, TypeS_2op>, Enc_8dec2e {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_asr_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -17801,7 +17858,7 @@ def S2_asr_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vasrw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -17810,7 +17867,7 @@ def S2_asr_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = asr($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -17819,7 +17876,7 @@ def S2_asr_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -17830,7 +17887,7 @@ def S2_asr_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -17841,7 +17898,7 @@ def S2_asr_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -17852,7 +17909,7 @@ def S2_asr_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -17863,7 +17920,7 @@ def S2_asr_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -17874,7 +17931,7 @@ def S2_asr_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asr($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -17885,7 +17942,7 @@ def S2_asr_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -17898,7 +17955,7 @@ def S2_asr_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -17911,7 +17968,7 @@ def S2_asr_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -17924,7 +17981,7 @@ def S2_asr_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -17937,30 +17994,32 @@ def S2_asr_r_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asr($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_47ab9233, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asr_r_svw_trun : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = vasrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_7ca2ea10, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_asr_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vasrh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -17969,7 +18028,7 @@ def S2_asr_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vasrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -17978,25 +18037,27 @@ def S2_brev : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = brev($Rs32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_brevp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = brev($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_cabacdecbin : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = decbin($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_5d806107, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -18008,77 +18069,84 @@ def S2_cl0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = cl0($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl0p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = cl0($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = cl1($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl1p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = cl1($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = clb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clbnorm : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = normamt($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clbp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = clb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clrbit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = clrbit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -18089,7 +18157,7 @@ def S2_clrbit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = clrbit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -18100,55 +18168,60 @@ def S2_ct0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = ct0($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct0p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = ct0($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = ct1($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct1p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = ct1($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_deinterleave : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = deinterleave($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_extractu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rd32 = extractu($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_11930928 {
+tc_c0cd91a8, TypeS_2op>, Enc_b388cf {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011010;
let hasNewValue = 1;
@@ -18159,7 +18232,7 @@ def S2_extractu_rp : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rd32 = extractu($Rs32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_15472748 {
+tc_87601822, TypeS_3op>, Enc_e07374 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001001000;
@@ -18171,7 +18244,7 @@ def S2_extractup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rdd32 = extractu($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_9894557 {
+tc_c0cd91a8, TypeS_2op>, Enc_b84c4c {
let Inst{31-24} = 0b10000001;
let prefersSlot3 = 1;
}
@@ -18179,7 +18252,7 @@ def S2_extractup_rp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = extractu($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -18189,56 +18262,61 @@ def S2_insert : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = insert($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2880796 {
+tc_d95f4e98, TypeS_2op>, Enc_a1e29d {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Constraints = "$Rx32 = $Rx32in";
}
def S2_insert_rp : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rx32 = insert($Rs32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_16311032 {
+tc_3c10f809, TypeS_3op>, Enc_179b35 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001000000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Constraints = "$Rx32 = $Rx32in";
}
def S2_insertp : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rxx32 = insert($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_631197 {
+tc_d95f4e98, TypeS_2op>, Enc_143a3c {
let Inst{31-24} = 0b10000011;
+let prefersSlot3 = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
def S2_insertp_rp : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 = insert($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_12702821 {
+tc_3c10f809, TypeS_3op>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001010000;
+let prefersSlot3 = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
def S2_interleave : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = interleave($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_lfsp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = lfs($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -18248,7 +18326,7 @@ def S2_lsl_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = lsl($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -18257,7 +18335,7 @@ def S2_lsl_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -18268,7 +18346,7 @@ def S2_lsl_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -18279,7 +18357,7 @@ def S2_lsl_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -18290,7 +18368,7 @@ def S2_lsl_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -18301,7 +18379,7 @@ def S2_lsl_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -18312,7 +18390,7 @@ def S2_lsl_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = lsl($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -18323,7 +18401,7 @@ def S2_lsl_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -18336,7 +18414,7 @@ def S2_lsl_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -18349,7 +18427,7 @@ def S2_lsl_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -18362,7 +18440,7 @@ def S2_lsl_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -18375,7 +18453,7 @@ def S2_lsl_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlslh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -18384,7 +18462,7 @@ def S2_lsl_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlslw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -18393,7 +18471,7 @@ def S2_lsr_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = lsr($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000000000;
}
@@ -18401,7 +18479,7 @@ def S2_lsr_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -18411,7 +18489,7 @@ def S2_lsr_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -18421,7 +18499,7 @@ def S2_lsr_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -18431,7 +18509,7 @@ def S2_lsr_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -18441,7 +18519,7 @@ def S2_lsr_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -18451,7 +18529,7 @@ def S2_lsr_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = lsr($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -18462,7 +18540,7 @@ def S2_lsr_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -18475,7 +18553,7 @@ def S2_lsr_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -18488,7 +18566,7 @@ def S2_lsr_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -18501,7 +18579,7 @@ def S2_lsr_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -18514,7 +18592,7 @@ def S2_lsr_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -18527,7 +18605,7 @@ def S2_lsr_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vlsrh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b001;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -18536,7 +18614,7 @@ def S2_lsr_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vlsrw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -18545,7 +18623,7 @@ def S2_lsr_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = lsr($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -18554,7 +18632,7 @@ def S2_lsr_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -18565,7 +18643,7 @@ def S2_lsr_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -18576,7 +18654,7 @@ def S2_lsr_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -18587,7 +18665,7 @@ def S2_lsr_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -18598,7 +18676,7 @@ def S2_lsr_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -18609,7 +18687,7 @@ def S2_lsr_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = lsr($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -18620,7 +18698,7 @@ def S2_lsr_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -18633,7 +18711,7 @@ def S2_lsr_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -18646,7 +18724,7 @@ def S2_lsr_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -18659,7 +18737,7 @@ def S2_lsr_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -18672,7 +18750,7 @@ def S2_lsr_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlsrh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -18681,7 +18759,7 @@ def S2_lsr_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlsrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -18690,7 +18768,7 @@ def S2_packhl : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = packhl($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1997594 {
+tc_548f402d, TypeALU32_3op>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110101100;
@@ -18700,7 +18778,7 @@ def S2_parityp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = parity($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9277990 {
+tc_87601822, TypeALU64>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010000000;
@@ -18712,7 +18790,7 @@ def S2_pstorerbf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100000;
let isPredicated = 1;
@@ -18734,7 +18812,7 @@ def S2_pstorerbf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -18752,7 +18830,7 @@ def S2_pstorerbf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -18760,7 +18838,7 @@ def S2_pstorerbfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -18779,7 +18857,7 @@ def S2_pstorerbnewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000100101;
@@ -18788,8 +18866,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -18804,7 +18882,7 @@ def S2_pstorerbnewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b100;
@@ -18814,8 +18892,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18825,7 +18903,7 @@ def S2_pstorerbnewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -18834,7 +18912,7 @@ def S2_pstorerbnewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -18845,8 +18923,8 @@ let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18856,7 +18934,7 @@ def S2_pstorerbnewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000000101;
@@ -18864,8 +18942,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -18880,7 +18958,7 @@ def S2_pstorerbnewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b100;
@@ -18889,8 +18967,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18900,7 +18978,7 @@ def S2_pstorerbnewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -18909,7 +18987,7 @@ def S2_pstorerbnewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -18919,8 +18997,8 @@ let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18930,7 +19008,7 @@ def S2_pstorerbt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000000;
let isPredicated = 1;
@@ -18951,7 +19029,7 @@ def S2_pstorerbt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -18968,7 +19046,7 @@ def S2_pstorerbt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -18976,7 +19054,7 @@ def S2_pstorerbtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -18994,7 +19072,7 @@ def S2_pstorerdf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100110;
let isPredicated = 1;
@@ -19015,7 +19093,7 @@ def S2_pstorerdf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19033,7 +19111,7 @@ def S2_pstorerdf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19041,7 +19119,7 @@ def S2_pstorerdfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19060,7 +19138,7 @@ def S2_pstorerdt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000110;
let isPredicated = 1;
@@ -19080,7 +19158,7 @@ def S2_pstorerdt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19097,7 +19175,7 @@ def S2_pstorerdt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19105,7 +19183,7 @@ def S2_pstorerdtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19123,7 +19201,7 @@ def S2_pstorerff_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100011;
let isPredicated = 1;
@@ -19144,7 +19222,7 @@ def S2_pstorerff_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19162,7 +19240,7 @@ def S2_pstorerff_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19170,7 +19248,7 @@ def S2_pstorerffnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19189,7 +19267,7 @@ def S2_pstorerft_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000011;
let isPredicated = 1;
@@ -19209,7 +19287,7 @@ def S2_pstorerft_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19226,7 +19304,7 @@ def S2_pstorerft_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19234,7 +19312,7 @@ def S2_pstorerftnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19252,7 +19330,7 @@ def S2_pstorerhf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100010;
let isPredicated = 1;
@@ -19274,7 +19352,7 @@ def S2_pstorerhf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19292,7 +19370,7 @@ def S2_pstorerhf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19300,7 +19378,7 @@ def S2_pstorerhfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19319,7 +19397,7 @@ def S2_pstorerhnewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000100101;
@@ -19328,8 +19406,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -19344,7 +19422,7 @@ def S2_pstorerhnewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b101;
@@ -19354,8 +19432,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19365,7 +19443,7 @@ def S2_pstorerhnewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19374,7 +19452,7 @@ def S2_pstorerhnewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -19385,8 +19463,8 @@ let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19396,7 +19474,7 @@ def S2_pstorerhnewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000000101;
@@ -19404,8 +19482,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -19420,7 +19498,7 @@ def S2_pstorerhnewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b101;
@@ -19429,8 +19507,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19440,7 +19518,7 @@ def S2_pstorerhnewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19449,7 +19527,7 @@ def S2_pstorerhnewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -19459,8 +19537,8 @@ let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19470,7 +19548,7 @@ def S2_pstorerht_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000010;
let isPredicated = 1;
@@ -19491,7 +19569,7 @@ def S2_pstorerht_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19508,7 +19586,7 @@ def S2_pstorerht_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19516,7 +19594,7 @@ def S2_pstorerhtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19534,7 +19612,7 @@ def S2_pstorerif_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100100;
let isPredicated = 1;
@@ -19556,7 +19634,7 @@ def S2_pstorerif_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19574,7 +19652,7 @@ def S2_pstorerif_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19582,7 +19660,7 @@ def S2_pstorerifnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19602,7 +19680,7 @@ def S2_pstorerinewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000100101;
@@ -19611,8 +19689,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -19627,7 +19705,7 @@ def S2_pstorerinewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b110;
@@ -19637,8 +19715,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19648,7 +19726,7 @@ def S2_pstorerinewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19657,7 +19735,7 @@ def S2_pstorerinewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -19668,8 +19746,8 @@ let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19679,7 +19757,7 @@ def S2_pstorerinewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000000101;
@@ -19687,8 +19765,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -19703,7 +19781,7 @@ def S2_pstorerinewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b110;
@@ -19712,8 +19790,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19723,7 +19801,7 @@ def S2_pstorerinewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19732,7 +19810,7 @@ def S2_pstorerinewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -19742,8 +19820,8 @@ let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19753,7 +19831,7 @@ def S2_pstorerit_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000100;
let isPredicated = 1;
@@ -19774,7 +19852,7 @@ def S2_pstorerit_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19791,7 +19869,7 @@ def S2_pstorerit_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19799,7 +19877,7 @@ def S2_pstoreritnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19817,7 +19895,7 @@ def S2_setbit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = setbit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -19828,7 +19906,7 @@ def S2_setbit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = setbit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -19839,7 +19917,7 @@ def S2_shuffeb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = shuffeb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19848,7 +19926,7 @@ def S2_shuffeh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = shuffeh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19857,7 +19935,7 @@ def S2_shuffob : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = shuffob($Rtt32,$Rss32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11687333 {
+tc_9c18c9a5, TypeS_3op>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19866,7 +19944,7 @@ def S2_shuffoh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = shuffoh($Rtt32,$Rss32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11687333 {
+tc_9c18c9a5, TypeS_3op>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -19875,7 +19953,7 @@ def S2_storerb_io : HInst<
(outs),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13150110, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_448f7f, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -19896,7 +19974,7 @@ def S2_storerb_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111000;
let accessSize = ByteAccess;
@@ -19909,7 +19987,7 @@ def S2_storerb_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_3915770 {
+tc_251c87b2, TypeST>, Enc_b15941 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001000;
@@ -19924,7 +20002,7 @@ def S2_storerb_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001000;
let addrMode = PostInc;
@@ -19938,7 +20016,7 @@ def S2_storerb_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_12492533, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_10bc21, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -19955,7 +20033,7 @@ def S2_storerb_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101000;
let addrMode = PostInc;
@@ -19968,7 +20046,7 @@ def S2_storerb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19976,7 +20054,7 @@ def S2_storerbgp : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Rt32),
"memb(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_12395768, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_1b64fb, AddrModeRel {
let Inst{24-21} = 0b0000;
let Inst{31-27} = 0b01001;
let accessSize = ByteAccess;
@@ -19994,15 +20072,15 @@ def S2_storerbnew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10002182, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_4df4e9, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -20018,14 +20096,14 @@ def S2_storerbnew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101111101;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerb_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20034,7 +20112,7 @@ def S2_storerbnew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_5326450 {
+tc_9c68db63, TypeST>, Enc_96ce4f {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b00;
@@ -20042,8 +20120,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20052,15 +20130,15 @@ def S2_storerbnew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20069,7 +20147,7 @@ def S2_storerbnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_5900401, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_c7cd90, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b000;
@@ -20077,8 +20155,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerb_pi";
let isPredicable = 1;
let isNVStorable = 1;
@@ -20089,15 +20167,15 @@ def S2_storerbnew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20105,7 +20183,7 @@ def S2_storerbnew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20114,14 +20192,14 @@ def S2_storerbnewgp : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Nt8),
"memb(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_4050532, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_ad1831, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storerbabs";
let isPredicable = 1;
@@ -20135,7 +20213,7 @@ def S2_storerd_io : HInst<
(outs),
(ins IntRegs:$Rs32, s29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rs32+#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16319737, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_ce6828, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20155,7 +20233,7 @@ def S2_storerd_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++$Mu2:brev) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111110;
let accessSize = DoubleWordAccess;
@@ -20166,7 +20244,7 @@ def S2_storerd_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++#$Ii:circ($Mu2)) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_4501395 {
+tc_251c87b2, TypeST>, Enc_395cc4 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001110;
@@ -20180,7 +20258,7 @@ def S2_storerd_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++I:circ($Mu2)) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001110;
let addrMode = PostInc;
@@ -20193,7 +20271,7 @@ def S2_storerd_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11271630, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_85bf58, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20210,7 +20288,7 @@ def S2_storerd_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++$Mu2) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101110;
let addrMode = PostInc;
@@ -20222,7 +20300,7 @@ def S2_storerd_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20230,7 +20308,7 @@ def S2_storerdgp : HInst<
(outs),
(ins u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd(gp+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11682941, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_5c124a, AddrModeRel {
let Inst{24-21} = 0b0110;
let Inst{31-27} = 0b01001;
let accessSize = DoubleWordAccess;
@@ -20247,7 +20325,7 @@ def S2_storerf_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7736768, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_e957fb, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20267,7 +20345,7 @@ def S2_storerf_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2:brev) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111011;
let accessSize = HalfWordAccess;
@@ -20278,7 +20356,7 @@ def S2_storerf_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++#$Ii:circ($Mu2)) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_10915758 {
+tc_251c87b2, TypeST>, Enc_935d9b {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001011;
@@ -20292,7 +20370,7 @@ def S2_storerf_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++I:circ($Mu2)) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001011;
let addrMode = PostInc;
@@ -20305,7 +20383,7 @@ def S2_storerf_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11492529, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_052c7d, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20322,7 +20400,7 @@ def S2_storerf_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101011;
let addrMode = PostInc;
@@ -20334,7 +20412,7 @@ def S2_storerf_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20342,7 +20420,7 @@ def S2_storerfgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(gp+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
@@ -20359,7 +20437,7 @@ def S2_storerh_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7736768, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_e957fb, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20380,7 +20458,7 @@ def S2_storerh_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111010;
let accessSize = HalfWordAccess;
@@ -20393,7 +20471,7 @@ def S2_storerh_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_10915758 {
+tc_251c87b2, TypeST>, Enc_935d9b {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001010;
@@ -20408,7 +20486,7 @@ def S2_storerh_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001010;
let addrMode = PostInc;
@@ -20422,7 +20500,7 @@ def S2_storerh_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11492529, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_052c7d, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20439,7 +20517,7 @@ def S2_storerh_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101010;
let addrMode = PostInc;
@@ -20452,7 +20530,7 @@ def S2_storerh_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20460,7 +20538,7 @@ def S2_storerhgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
@@ -20478,15 +20556,15 @@ def S2_storerhnew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Nt8),
"memh($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_748676, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_0d8870, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -20502,14 +20580,14 @@ def S2_storerhnew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101111101;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerh_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20518,7 +20596,7 @@ def S2_storerhnew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10326434 {
+tc_9c68db63, TypeST>, Enc_91b9fe {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b01;
@@ -20526,8 +20604,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20536,15 +20614,15 @@ def S2_storerhnew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20553,7 +20631,7 @@ def S2_storerhnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_6900405, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_e26546, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b001;
@@ -20561,8 +20639,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerh_pi";
let isNVStorable = 1;
let isPredicable = 1;
@@ -20573,15 +20651,15 @@ def S2_storerhnew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20589,7 +20667,7 @@ def S2_storerhnew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20598,14 +20676,14 @@ def S2_storerhnewgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Nt8),
"memh(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_13618890, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_bc03e5, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storerhabs";
let isPredicable = 1;
@@ -20619,7 +20697,7 @@ def S2_storeri_io : HInst<
(outs),
(ins IntRegs:$Rs32, s30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_6673186, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_143445, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20640,7 +20718,7 @@ def S2_storeri_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111100;
let accessSize = WordAccess;
@@ -20653,7 +20731,7 @@ def S2_storeri_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_9915754 {
+tc_251c87b2, TypeST>, Enc_79b8c8 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001100;
@@ -20668,7 +20746,7 @@ def S2_storeri_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001100;
let addrMode = PostInc;
@@ -20682,7 +20760,7 @@ def S2_storeri_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10492541, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_db40cd, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20699,7 +20777,7 @@ def S2_storeri_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101100;
let addrMode = PostInc;
@@ -20712,7 +20790,7 @@ def S2_storeri_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20720,7 +20798,7 @@ def S2_storerigp : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Rt32),
"memw(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_15999208, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_541f26, AddrModeRel {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b01001;
let accessSize = WordAccess;
@@ -20738,15 +20816,15 @@ def S2_storerinew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s30_2Imm:$Ii, IntRegs:$Nt8),
"memw($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_8409782, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_690862, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -20762,14 +20840,14 @@ def S2_storerinew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101111101;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storeri_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20778,7 +20856,7 @@ def S2_storerinew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_11326438 {
+tc_9c68db63, TypeST>, Enc_3f97c8 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b10;
@@ -20786,8 +20864,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20796,15 +20874,15 @@ def S2_storerinew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20813,7 +20891,7 @@ def S2_storerinew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_7900405, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_223005, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b010;
@@ -20821,8 +20899,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storeri_pi";
let isPredicable = 1;
let opNewValue = 3;
@@ -20832,15 +20910,15 @@ def S2_storerinew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20848,7 +20926,7 @@ def S2_storerinew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20857,14 +20935,14 @@ def S2_storerinewgp : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Nt8),
"memw(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_12297800, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_78cbf0, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storeriabs";
let isPredicable = 1;
@@ -20878,20 +20956,20 @@ def S2_storew_locked : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw_locked($Rs32,$Pd4) = $Rt32",
-ST_tc_ld_SLOT0, TypeST>, Enc_10157519 {
+tc_7d01cbdc, TypeST>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100000101;
let accessSize = WordAccess;
+let isPredicateLate = 1;
let isSoloAX = 1;
let mayStore = 1;
-let isPredicateLate = 1;
}
def S2_svsathb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsathb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -20902,7 +20980,7 @@ def S2_svsathub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsathub($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -20913,7 +20991,7 @@ def S2_tableidxb : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxb($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011100;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20924,7 +21002,7 @@ def S2_tableidxb_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxb($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20935,7 +21013,7 @@ def S2_tableidxd : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxd($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20946,7 +21024,7 @@ def S2_tableidxd_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxd($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20956,7 +21034,7 @@ def S2_tableidxh : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxh($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20967,7 +21045,7 @@ def S2_tableidxh_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxh($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20977,7 +21055,7 @@ def S2_tableidxw : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxw($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20988,7 +21066,7 @@ def S2_tableidxw_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxw($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20998,7 +21076,7 @@ def S2_togglebit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = togglebit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -21009,7 +21087,7 @@ def S2_togglebit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = togglebit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -21020,7 +21098,7 @@ def S2_tstbit_i : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = tstbit($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742 {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101000;
@@ -21029,7 +21107,7 @@ def S2_tstbit_r : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = tstbit($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111000;
@@ -21038,7 +21116,7 @@ def S2_valignib : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32, u3_0Imm:$Ii),
"$Rdd32 = valignb($Rtt32,$Rss32,#$Ii)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11971407 {
+tc_d1b5a4b6, TypeS_3op>, Enc_729ff7 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000000000;
}
@@ -21046,7 +21124,7 @@ def S2_valignrb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32, PredRegs:$Pu4),
"$Rdd32 = valignb($Rtt32,$Rss32,$Pu4)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11552785 {
+tc_d1b5a4b6, TypeS_3op>, Enc_8c6530 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010000;
@@ -21055,7 +21133,7 @@ def S2_vcnegh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vcnegh($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_47ab9233, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011110;
@@ -21066,7 +21144,7 @@ def S2_vcrotate : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vcrotate($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_63cd9d2d, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011110;
@@ -21077,7 +21155,7 @@ def S2_vrcnegh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += vrcnegh($Rss32,$Rt32)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_8cb685d9, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -21088,28 +21166,30 @@ def S2_vrndpackwh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vrndwh($Rss32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_88fa2da6, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_vrndpackwhs : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vrndwh($Rss32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_94e6ffd9, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_vsathb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsathb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21120,7 +21200,7 @@ def S2_vsathb_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsathb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21129,7 +21209,7 @@ def S2_vsathub : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsathub($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21140,7 +21220,7 @@ def S2_vsathub_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsathub($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21149,7 +21229,7 @@ def S2_vsatwh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsatwh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21160,7 +21240,7 @@ def S2_vsatwh_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsatwh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21169,7 +21249,7 @@ def S2_vsatwuh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsatwuh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21180,7 +21260,7 @@ def S2_vsatwuh_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsatwuh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21189,7 +21269,7 @@ def S2_vsplatrb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsplatb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
@@ -21201,7 +21281,7 @@ def S2_vsplatrh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsplath($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100010;
let isReMaterializable = 1;
@@ -21211,7 +21291,7 @@ def S2_vspliceib : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, u3_0Imm:$Ii),
"$Rdd32 = vspliceb($Rss32,$Rtt32,#$Ii)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_16730127 {
+tc_d1b5a4b6, TypeS_3op>, Enc_d50cd3 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000000100;
}
@@ -21219,7 +21299,7 @@ def S2_vsplicerb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Pu4),
"$Rdd32 = vspliceb($Rss32,$Rtt32,$Pu4)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_5178985 {
+tc_d1b5a4b6, TypeS_3op>, Enc_dbd70c {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010100;
@@ -21228,7 +21308,7 @@ def S2_vsxtbh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsxtbh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21238,7 +21318,7 @@ def S2_vsxthw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsxthw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21248,7 +21328,7 @@ def S2_vtrunehb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vtrunehb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
@@ -21258,7 +21338,7 @@ def S2_vtrunewh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunewh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -21267,7 +21347,7 @@ def S2_vtrunohb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vtrunohb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
@@ -21277,7 +21357,7 @@ def S2_vtrunowh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunowh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -21286,7 +21366,7 @@ def S2_vzxtbh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vzxtbh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21296,7 +21376,7 @@ def S2_vzxthw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vzxthw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21306,7 +21386,7 @@ def S4_addaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Ru32, s32_0Imm:$Ii),
"$Rd32 = add($Rs32,add($Ru32,#$Ii))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6495334 {
+tc_090485bb, TypeALU64>, Enc_8b8d61 {
let Inst{31-23} = 0b110110110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21321,7 +21401,7 @@ def S4_addi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = add(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b100;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21339,7 +21419,7 @@ def S4_addi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = add(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b100;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21357,7 +21437,7 @@ def S4_andi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = and(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b000;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21375,7 +21455,7 @@ def S4_andi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = and(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b000;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21393,7 +21473,7 @@ def S4_clbaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s6_0Imm:$Ii),
"$Rd32 = add(clb($Rs32),#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_5523416 {
+tc_87601822, TypeS_2op>, Enc_9fae8a {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10001100001;
let hasNewValue = 1;
@@ -21404,7 +21484,7 @@ def S4_clbpaddi : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, s6_0Imm:$Ii),
"$Rd32 = add(clb($Rss32),#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_10188026 {
+tc_87601822, TypeS_2op>, Enc_a1640c {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
@@ -21415,17 +21495,18 @@ def S4_clbpnorm : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = normamt($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S4_extract : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rd32 = extract($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_11930928 {
+tc_c0cd91a8, TypeS_2op>, Enc_b388cf {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011011;
let hasNewValue = 1;
@@ -21436,7 +21517,7 @@ def S4_extract_rp : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rd32 = extract($Rs32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_15472748 {
+tc_87601822, TypeS_3op>, Enc_e07374 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001001000;
@@ -21448,7 +21529,7 @@ def S4_extractp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rdd32 = extract($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_9894557 {
+tc_c0cd91a8, TypeS_2op>, Enc_b84c4c {
let Inst{31-24} = 0b10001010;
let prefersSlot3 = 1;
}
@@ -21456,7 +21537,7 @@ def S4_extractp_rp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = extract($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -21466,7 +21547,7 @@ def S4_lsli : HInst<
(outs IntRegs:$Rd32),
(ins s6_0Imm:$Ii, IntRegs:$Rt32),
"$Rd32 = lsl(#$Ii,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_518319 {
+tc_9c18c9a5, TypeS_3op>, Enc_fef969 {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -21477,7 +21558,7 @@ def S4_ntstbit_i : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = !tstbit($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742 {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101001;
@@ -21486,7 +21567,7 @@ def S4_ntstbit_r : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !tstbit($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111001;
@@ -21495,7 +21576,7 @@ def S4_or_andi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 |= and($Rs32,#$Ii)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6356866 {
+tc_3c10f809, TypeALU64>, Enc_b0e9d8 {
let Inst{31-22} = 0b1101101000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21512,7 +21593,7 @@ def S4_or_andix : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Ru32, IntRegs:$Rx32in, s32_0Imm:$Ii),
"$Rx32 = or($Ru32,and($Rx32in,#$Ii))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_7504828 {
+tc_3c10f809, TypeALU64>, Enc_b4e6cf {
let Inst{31-22} = 0b1101101001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21528,7 +21609,7 @@ def S4_or_ori : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 |= or($Rs32,#$Ii)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6356866 {
+tc_3c10f809, TypeALU64>, Enc_b0e9d8 {
let Inst{31-22} = 0b1101101010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21545,7 +21626,7 @@ def S4_ori_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = or(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b010;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21563,7 +21644,7 @@ def S4_ori_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = or(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b010;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21581,7 +21662,7 @@ def S4_parity : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = parity($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_87601822, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101111;
@@ -21593,7 +21674,7 @@ def S4_pstorerbf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -21618,7 +21699,7 @@ def S4_pstorerbf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -21634,7 +21715,7 @@ def S4_pstorerbfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -21643,8 +21724,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
@@ -21660,7 +21741,7 @@ def S4_pstorerbfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110000;
let isPredicated = 1;
@@ -21683,7 +21764,7 @@ def S4_pstorerbfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -21700,7 +21781,7 @@ def S4_pstorerbfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -21708,7 +21789,7 @@ def S4_pstorerbnewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b000;
@@ -21718,9 +21799,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21735,7 +21816,7 @@ def S4_pstorerbnewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -21743,8 +21824,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21754,7 +21835,7 @@ def S4_pstorerbnewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -21764,10 +21845,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21782,7 +21863,7 @@ def S4_pstorerbnewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000110101;
@@ -21792,8 +21873,8 @@ let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -21808,7 +21889,7 @@ def S4_pstorerbnewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -21817,8 +21898,8 @@ let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21828,7 +21909,7 @@ def S4_pstorerbnewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -21837,7 +21918,7 @@ def S4_pstorerbnewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b000;
@@ -21846,9 +21927,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21863,15 +21944,15 @@ def S4_pstorerbnewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21881,7 +21962,7 @@ def S4_pstorerbnewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -21890,10 +21971,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21908,7 +21989,7 @@ def S4_pstorerbnewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000010101;
@@ -21917,8 +21998,8 @@ let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -21933,7 +22014,7 @@ def S4_pstorerbnewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -21941,8 +22022,8 @@ let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21952,7 +22033,7 @@ def S4_pstorerbnewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -21961,7 +22042,7 @@ def S4_pstorerbt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -21985,7 +22066,7 @@ def S4_pstorerbt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100000;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22000,7 +22081,7 @@ def S4_pstorerbtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22008,8 +22089,8 @@ let Inst{31-18} = 0b10101111000000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
@@ -22025,7 +22106,7 @@ def S4_pstorerbtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010000;
let isPredicated = 1;
@@ -22047,7 +22128,7 @@ def S4_pstorerbtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110000;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22063,7 +22144,7 @@ def S4_pstorerbtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22071,7 +22152,7 @@ def S4_pstorerdf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22095,7 +22176,7 @@ def S4_pstorerdf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110101110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22110,7 +22191,7 @@ def S4_pstorerdfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_336e698c, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22119,8 +22200,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerd";
let BaseOpcode = "S2_storerdabs";
@@ -22135,7 +22216,7 @@ def S4_pstorerdfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110110;
let isPredicated = 1;
@@ -22157,7 +22238,7 @@ def S4_pstorerdfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110111110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22173,7 +22254,7 @@ def S4_pstorerdfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22181,7 +22262,7 @@ def S4_pstorerdt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22204,7 +22285,7 @@ def S4_pstorerdt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110100110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22218,7 +22299,7 @@ def S4_pstorerdtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_336e698c, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22226,8 +22307,8 @@ let Inst{31-18} = 0b10101111110000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerd";
let BaseOpcode = "S2_storerdabs";
@@ -22242,7 +22323,7 @@ def S4_pstorerdtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010110;
let isPredicated = 1;
@@ -22263,7 +22344,7 @@ def S4_pstorerdtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110110110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22278,7 +22359,7 @@ def S4_pstorerdtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22286,7 +22367,7 @@ def S4_pstorerff_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22310,7 +22391,7 @@ def S4_pstorerff_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22325,7 +22406,7 @@ def S4_pstorerffnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22334,8 +22415,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerf";
let BaseOpcode = "S2_storerfabs";
@@ -22350,7 +22431,7 @@ def S4_pstorerffnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110011;
let isPredicated = 1;
@@ -22372,7 +22453,7 @@ def S4_pstorerffnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22388,7 +22469,7 @@ def S4_pstorerffnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22396,7 +22477,7 @@ def S4_pstorerft_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22419,7 +22500,7 @@ def S4_pstorerft_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100011;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22433,7 +22514,7 @@ def S4_pstorerftnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22441,8 +22522,8 @@ let Inst{31-18} = 0b10101111011000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerf";
let BaseOpcode = "S2_storerfabs";
@@ -22457,7 +22538,7 @@ def S4_pstorerftnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010011;
let isPredicated = 1;
@@ -22478,7 +22559,7 @@ def S4_pstorerftnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110011;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22493,7 +22574,7 @@ def S4_pstorerftnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22501,7 +22582,7 @@ def S4_pstorerhf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22526,7 +22607,7 @@ def S4_pstorerhf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22542,7 +22623,7 @@ def S4_pstorerhfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22551,8 +22632,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
@@ -22568,7 +22649,7 @@ def S4_pstorerhfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110010;
let isPredicated = 1;
@@ -22591,7 +22672,7 @@ def S4_pstorerhfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22608,7 +22689,7 @@ def S4_pstorerhfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22616,7 +22697,7 @@ def S4_pstorerhnewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b001;
@@ -22626,9 +22707,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22643,7 +22724,7 @@ def S4_pstorerhnewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -22651,8 +22732,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22662,7 +22743,7 @@ def S4_pstorerhnewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -22672,10 +22753,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22690,7 +22771,7 @@ def S4_pstorerhnewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000110101;
@@ -22700,8 +22781,8 @@ let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -22716,7 +22797,7 @@ def S4_pstorerhnewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -22725,8 +22806,8 @@ let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22736,7 +22817,7 @@ def S4_pstorerhnewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -22745,7 +22826,7 @@ def S4_pstorerhnewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b001;
@@ -22754,9 +22835,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22771,15 +22852,15 @@ def S4_pstorerhnewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22789,7 +22870,7 @@ def S4_pstorerhnewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -22798,10 +22879,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22816,7 +22897,7 @@ def S4_pstorerhnewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000010101;
@@ -22825,8 +22906,8 @@ let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -22841,7 +22922,7 @@ def S4_pstorerhnewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -22849,8 +22930,8 @@ let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22860,7 +22941,7 @@ def S4_pstorerhnewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -22869,7 +22950,7 @@ def S4_pstorerht_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22893,7 +22974,7 @@ def S4_pstorerht_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100010;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22908,7 +22989,7 @@ def S4_pstorerhtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22916,8 +22997,8 @@ let Inst{31-18} = 0b10101111010000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
@@ -22933,7 +23014,7 @@ def S4_pstorerhtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010010;
let isPredicated = 1;
@@ -22955,7 +23036,7 @@ def S4_pstorerhtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110010;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22971,7 +23052,7 @@ def S4_pstorerhtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22979,7 +23060,7 @@ def S4_pstorerif_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -23004,7 +23085,7 @@ def S4_pstorerif_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23020,7 +23101,7 @@ def S4_pstorerifnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -23029,8 +23110,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
@@ -23046,7 +23127,7 @@ def S4_pstorerifnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110100;
let isPredicated = 1;
@@ -23069,7 +23150,7 @@ def S4_pstorerifnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23086,7 +23167,7 @@ def S4_pstorerifnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23094,7 +23175,7 @@ def S4_pstorerinewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b010;
@@ -23104,9 +23185,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23121,7 +23202,7 @@ def S4_pstorerinewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -23129,8 +23210,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23140,7 +23221,7 @@ def S4_pstorerinewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -23150,10 +23231,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23168,7 +23249,7 @@ def S4_pstorerinewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000110101;
@@ -23178,8 +23259,8 @@ let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -23194,7 +23275,7 @@ def S4_pstorerinewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -23203,8 +23284,8 @@ let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23214,7 +23295,7 @@ def S4_pstorerinewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -23223,7 +23304,7 @@ def S4_pstorerinewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b010;
@@ -23232,9 +23313,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23249,15 +23330,15 @@ def S4_pstorerinewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23267,7 +23348,7 @@ def S4_pstorerinewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -23276,10 +23357,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23294,7 +23375,7 @@ def S4_pstorerinewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000010101;
@@ -23303,8 +23384,8 @@ let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -23319,7 +23400,7 @@ def S4_pstorerinewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -23327,8 +23408,8 @@ let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23338,7 +23419,7 @@ def S4_pstorerinewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -23347,7 +23428,7 @@ def S4_pstorerit_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -23371,7 +23452,7 @@ def S4_pstorerit_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100100;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -23386,7 +23467,7 @@ def S4_pstoreritnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -23394,8 +23475,8 @@ let Inst{31-18} = 0b10101111100000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
@@ -23411,7 +23492,7 @@ def S4_pstoreritnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010100;
let isPredicated = 1;
@@ -23433,7 +23514,7 @@ def S4_pstoreritnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110100;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -23449,7 +23530,7 @@ def S4_pstoreritnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23457,20 +23538,20 @@ def S4_stored_locked : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"memd_locked($Rs32,$Pd4) = $Rtt32",
-ST_tc_ld_SLOT0, TypeST>, Enc_2921694 {
+tc_7d01cbdc, TypeST>, Enc_d7dc10 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100000111;
let accessSize = DoubleWordAccess;
+let isPredicateLate = 1;
let isSoloAX = 1;
let mayStore = 1;
-let isPredicateLate = 1;
}
def S4_storeirb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11282123, PredNewRel {
+tc_fcee8723, TypeST>, Enc_8203bb, PredNewRel {
let Inst{31-21} = 0b00111100000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -23489,7 +23570,7 @@ def S4_storeirb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23497,7 +23578,7 @@ def S4_storeirbf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111000100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23517,7 +23598,7 @@ def S4_storeirbf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23525,7 +23606,7 @@ def S4_storeirbfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111001100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23546,7 +23627,7 @@ def S4_storeirbfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23554,7 +23635,7 @@ def S4_storeirbt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111000000;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23573,7 +23654,7 @@ def S4_storeirbt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23581,7 +23662,7 @@ def S4_storeirbtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111001000;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23601,7 +23682,7 @@ def S4_storeirbtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23609,7 +23690,7 @@ def S4_storeirh_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_10282127, PredNewRel {
+tc_fcee8723, TypeST>, Enc_a803e0, PredNewRel {
let Inst{31-21} = 0b00111100001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
@@ -23628,7 +23709,7 @@ def S4_storeirh_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23636,7 +23717,7 @@ def S4_storeirhf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23656,7 +23737,7 @@ def S4_storeirhf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23664,7 +23745,7 @@ def S4_storeirhfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111001101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23685,7 +23766,7 @@ def S4_storeirhfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23693,7 +23774,7 @@ def S4_storeirht_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111000001;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23712,7 +23793,7 @@ def S4_storeirht_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23720,7 +23801,7 @@ def S4_storeirhtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111001001;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23740,7 +23821,7 @@ def S4_storeirhtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23748,7 +23829,7 @@ def S4_storeiri_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9282127, PredNewRel {
+tc_fcee8723, TypeST>, Enc_f37377, PredNewRel {
let Inst{31-21} = 0b00111100010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -23767,7 +23848,7 @@ def S4_storeiri_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23775,7 +23856,7 @@ def S4_storeirif_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111000110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23795,7 +23876,7 @@ def S4_storeirif_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23803,7 +23884,7 @@ def S4_storeirifnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111001110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23824,7 +23905,7 @@ def S4_storeirifnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23832,7 +23913,7 @@ def S4_storeirit_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111000010;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23851,7 +23932,7 @@ def S4_storeirit_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23859,7 +23940,7 @@ def S4_storeiritnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111001010;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23879,7 +23960,7 @@ def S4_storeiritnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23887,7 +23968,7 @@ def S4_storerb_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memb($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011000;
@@ -23910,7 +23991,7 @@ def S4_storerb_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011000;
let addrMode = BaseRegOffset;
@@ -23926,7 +24007,7 @@ def S4_storerb_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memb($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101000;
let addrMode = BaseLongOffset;
@@ -23948,7 +24029,7 @@ def S4_storerbnew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memb($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10101011101;
@@ -23957,9 +24038,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storerb_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -23973,14 +24054,14 @@ def S4_storerbnew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0000;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -23991,16 +24072,16 @@ def S4_storerbnew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memb($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S4_storerb_ur";
let DecoderNamespace = "MustExtend";
@@ -24015,7 +24096,7 @@ def S4_storerd_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, DoubleRegs:$Rtt32),
"memd($Re32=#$II) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_8131399 {
+tc_336e698c, TypeST>, Enc_c7a204 {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011110;
@@ -24037,7 +24118,7 @@ def S4_storerd_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9772987, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_55355c, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011110;
let addrMode = BaseRegOffset;
@@ -24052,7 +24133,7 @@ def S4_storerd_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, DoubleRegs:$Rtt32),
"memd($Ru32<<#$Ii+#$II) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_12848507, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_f79415, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101110;
let addrMode = BaseLongOffset;
@@ -24073,7 +24154,7 @@ def S4_storerf_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memh($Re32=#$II) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246 {
+tc_336e698c, TypeST>, Enc_8bcba4 {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011011;
@@ -24095,7 +24176,7 @@ def S4_storerf_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011011;
let addrMode = BaseRegOffset;
@@ -24110,7 +24191,7 @@ def S4_storerf_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memh($Ru32<<#$Ii+#$II) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101011;
let addrMode = BaseLongOffset;
@@ -24131,7 +24212,7 @@ def S4_storerh_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memh($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011010;
@@ -24154,7 +24235,7 @@ def S4_storerh_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011010;
let addrMode = BaseRegOffset;
@@ -24170,7 +24251,7 @@ def S4_storerh_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memh($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101010;
let addrMode = BaseLongOffset;
@@ -24192,7 +24273,7 @@ def S4_storerhnew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memh($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b001;
let Inst{31-21} = 0b10101011101;
@@ -24201,9 +24282,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storerh_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -24217,14 +24298,14 @@ def S4_storerhnew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0001;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -24235,16 +24316,16 @@ def S4_storerhnew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memh($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_ur";
let DecoderNamespace = "MustExtend";
@@ -24259,7 +24340,7 @@ def S4_storeri_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memw($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011100;
@@ -24282,7 +24363,7 @@ def S4_storeri_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011100;
let addrMode = BaseRegOffset;
@@ -24298,7 +24379,7 @@ def S4_storeri_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memw($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101100;
let addrMode = BaseLongOffset;
@@ -24320,7 +24401,7 @@ def S4_storerinew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memw($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b010;
let Inst{31-21} = 0b10101011101;
@@ -24329,9 +24410,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storeri_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -24345,14 +24426,14 @@ def S4_storerinew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0010;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -24363,16 +24444,16 @@ def S4_storerinew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memw($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_ur";
let DecoderNamespace = "MustExtend";
@@ -24387,7 +24468,7 @@ def S4_subaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Ru32),
"$Rd32 = add($Rs32,sub(#$Ii,$Ru32))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6495334 {
+tc_090485bb, TypeALU64>, Enc_8b8d61 {
let Inst{31-23} = 0b110110111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24402,7 +24483,7 @@ def S4_subi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = sub(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b110;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -24420,7 +24501,7 @@ def S4_subi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = sub(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b110;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -24438,7 +24519,7 @@ def S4_vrcrotate : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rdd32 = vrcrotate($Rss32,$Rt32,#$Ii)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_114098 {
+tc_6264c5e0, TypeS_3op>, Enc_645d54 {
let Inst{7-6} = 0b11;
let Inst{31-21} = 0b11000011110;
let prefersSlot3 = 1;
@@ -24447,7 +24528,7 @@ def S4_vrcrotate_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rxx32 += vrcrotate($Rss32,$Rt32,#$Ii)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_13114546 {
+tc_bc5561d8, TypeS_3op>, Enc_b72622 {
let Inst{7-6} = 0b00;
let Inst{31-21} = 0b11001011101;
let prefersSlot3 = 1;
@@ -24457,17 +24538,18 @@ def S4_vxaddsubh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubh($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxaddsubhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubh($Rss32,$Rtt32):rnd:>>1:sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_63cd9d2d, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -24478,27 +24560,29 @@ def S4_vxaddsubw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubw($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxsubaddh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddh($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxsubaddhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddh($Rss32,$Rtt32):rnd:>>1:sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_63cd9d2d, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -24509,17 +24593,18 @@ def S4_vxsubaddw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddw($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S5_asrhub_rnd_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):raw",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8038806, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_11a146, Requires<[HasV5T]> {
let Inst{7-5} = 0b100;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10001000011;
@@ -24532,7 +24617,7 @@ def S5_asrhub_rnd_sat_goodsyntax : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):rnd:sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -24541,7 +24626,7 @@ def S5_asrhub_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8038806, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_11a146, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10001000011;
@@ -24554,7 +24639,7 @@ def S5_popcountp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = popcount($Rss32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_ca280e8b, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
@@ -24565,7 +24650,7 @@ def S5_vasrhrnd : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_12b6e9, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000001;
@@ -24575,14 +24660,14 @@ def S5_vasrhrnd_goodsyntax : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii):rnd",
-S_2op_tc_1_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let isPseudo = 1;
}
def S6_rol_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995, Requires<[HasV60T]> {
+tc_9f518242, TypeS_2op>, Enc_5eac98, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000000000;
}
@@ -24590,7 +24675,7 @@ def S6_rol_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -24600,7 +24685,7 @@ def S6_rol_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -24610,7 +24695,7 @@ def S6_rol_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -24620,7 +24705,7 @@ def S6_rol_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -24630,7 +24715,7 @@ def S6_rol_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -24640,7 +24725,7 @@ def S6_rol_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456, Requires<[HasV60T]> {
+tc_9f518242, TypeS_2op>, Enc_a05677, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -24651,7 +24736,7 @@ def S6_rol_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -24664,7 +24749,7 @@ def S6_rol_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -24677,7 +24762,7 @@ def S6_rol_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -24690,7 +24775,7 @@ def S6_rol_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -24703,7 +24788,7 @@ def S6_rol_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -24716,7 +24801,7 @@ def S6_vsplatrbp : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsplatb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV62T]> {
+tc_78b3c689, TypeS_2op>, Enc_3a3d62, Requires<[HasV62T]> {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100010;
}
@@ -24724,7 +24809,7 @@ def S6_vtrunehb_ppp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunehb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> {
+tc_9f518242, TypeS_3op>, Enc_a56825, Requires<[HasV62T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -24733,7 +24818,7 @@ def S6_vtrunohb_ppp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunohb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> {
+tc_9f518242, TypeS_3op>, Enc_a56825, Requires<[HasV62T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -24742,7 +24827,7 @@ def SA1_addi : HInst<
(outs GeneralSubRegs:$Rx16),
(ins IntRegs:$Rx16in, s32_0Imm:$Ii),
"$Rx16 = add($Rx16in,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_3974695 {
+tc_821c4233, TypeSUBINSN>, Enc_93af4c {
let Inst{12-11} = 0b00;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24759,7 +24844,7 @@ def SA1_addrx : HInst<
(outs GeneralSubRegs:$Rx16),
(ins IntRegs:$Rx16in, GeneralSubRegs:$Rs16),
"$Rx16 = add($Rx16in,$Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_6135183 {
+tc_821c4233, TypeSUBINSN>, Enc_0527db {
let Inst{12-8} = 0b11000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24771,7 +24856,7 @@ def SA1_addsp : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u6_2Imm:$Ii),
"$Rd16 = add(r29,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_176263 {
+tc_d2609065, TypeSUBINSN>, Enc_2df31d {
let Inst{12-10} = 0b011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24783,7 +24868,7 @@ def SA1_and1 : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = and($Rs16,#1)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24794,7 +24879,7 @@ def SA1_clrf : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (!p0) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_7c2dcd4d, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -24808,7 +24893,7 @@ def SA1_clrfnew : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (!p0.new) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_f26aa619, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -24823,7 +24908,7 @@ def SA1_clrt : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (p0) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_7c2dcd4d, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100110;
let isPredicated = 1;
let hasNewValue = 1;
@@ -24836,7 +24921,7 @@ def SA1_clrtnew : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (p0.new) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_f26aa619, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -24850,7 +24935,7 @@ def SA1_cmpeqi : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u2_0Imm:$Ii),
"p0 = cmp.eq($Rs16,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_2079016 {
+tc_e8c7a357, TypeSUBINSN>, Enc_63eaeb {
let Inst{3-2} = 0b00;
let Inst{12-8} = 0b11001;
let AsmVariantName = "NonParsable";
@@ -24861,7 +24946,7 @@ def SA1_combine0i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#0,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b00;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24873,7 +24958,7 @@ def SA1_combine1i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#1,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b01;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24885,7 +24970,7 @@ def SA1_combine2i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#2,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b10;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24897,7 +24982,7 @@ def SA1_combine3i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#3,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b11;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24909,7 +24994,7 @@ def SA1_combinerz : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins GeneralSubRegs:$Rs16),
"$Rdd8 = combine($Rs16,#0)",
-PSEUDO, TypeSUBINSN>, Enc_10501894 {
+tc_d2609065, TypeSUBINSN>, Enc_399e12 {
let Inst{3-3} = 0b1;
let Inst{12-8} = 0b11101;
let hasNewValue = 1;
@@ -24921,7 +25006,7 @@ def SA1_combinezr : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins GeneralSubRegs:$Rs16),
"$Rdd8 = combine(#0,$Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_10501894 {
+tc_d2609065, TypeSUBINSN>, Enc_399e12 {
let Inst{3-3} = 0b0;
let Inst{12-8} = 0b11101;
let hasNewValue = 1;
@@ -24933,7 +25018,7 @@ def SA1_dec : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, n1Const:$n1),
"$Rd16 = add($Rs16,#$n1)",
-PSEUDO, TypeSUBINSN>, Enc_10597934 {
+tc_821c4233, TypeSUBINSN>, Enc_ee5ed0 {
let Inst{12-8} = 0b10011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24944,7 +25029,7 @@ def SA1_inc : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = add($Rs16,#1)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24955,7 +25040,7 @@ def SA1_seti : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u32_0Imm:$Ii),
"$Rd16 = #$Ii",
-PSEUDO, TypeSUBINSN>, Enc_2176383 {
+tc_d2609065, TypeSUBINSN>, Enc_e39bb2 {
let Inst{12-10} = 0b010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24971,7 +25056,7 @@ def SA1_setin1 : HInst<
(outs GeneralSubRegs:$Rd16),
(ins n1Const:$n1),
"$Rd16 = #$n1",
-PSEUDO, TypeSUBINSN>, Enc_13336212 {
+tc_d2609065, TypeSUBINSN>, Enc_7a0ea6 {
let Inst{12-4} = 0b110100000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24982,7 +25067,7 @@ def SA1_sxtb : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = sxtb($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24993,7 +25078,7 @@ def SA1_sxth : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = sxth($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10100;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25004,7 +25089,7 @@ def SA1_tfr : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = $Rs16",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25015,7 +25100,7 @@ def SA1_zxtb : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = and($Rs16,#255)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25026,7 +25111,7 @@ def SA1_zxth : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = zxth($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25037,7 +25122,7 @@ def SL1_loadri_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"$Rd16 = memw($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_13606251 {
+tc_bf6fa601, TypeSUBINSN>, Enc_53dca9 {
let Inst{12-12} = 0b0;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25051,7 +25136,7 @@ def SL1_loadrub_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"$Rd16 = memub($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15606259 {
+tc_bf6fa601, TypeSUBINSN>, Enc_c175d0 {
let Inst{12-12} = 0b1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25065,7 +25150,7 @@ def SL2_deallocframe : HInst<
(outs),
(ins),
"deallocframe",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_86442910, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111100000000;
let accessSize = DoubleWordAccess;
let AsmVariantName = "NonParsable";
@@ -25078,7 +25163,7 @@ def SL2_jumpr31 : HInst<
(outs),
(ins),
"jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000000;
let isTerminator = 1;
let isIndirectBranch = 1;
@@ -25093,7 +25178,7 @@ def SL2_jumpr31_f : HInst<
(outs),
(ins),
"if (!p0) jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25111,7 +25196,7 @@ def SL2_jumpr31_fnew : HInst<
(outs),
(ins),
"if (!p0.new) jumpr:nt r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25119,8 +25204,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
+let isReturn = 1;
let Uses = [P0, R31];
let Defs = [PC];
let isTaken = Inst{4};
@@ -25130,7 +25215,7 @@ def SL2_jumpr31_t : HInst<
(outs),
(ins),
"if (p0) jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000100;
let isPredicated = 1;
let isTerminator = 1;
@@ -25147,15 +25232,15 @@ def SL2_jumpr31_tnew : HInst<
(outs),
(ins),
"if (p0.new) jumpr:nt r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000110;
let isPredicated = 1;
let isTerminator = 1;
let isIndirectBranch = 1;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
+let isReturn = 1;
let Uses = [P0, R31];
let Defs = [PC];
let isTaken = Inst{4};
@@ -25165,7 +25250,7 @@ def SL2_loadrb_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_0Imm:$Ii),
"$Rd16 = memb($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_3135259 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2fbf3c {
let Inst{12-11} = 0b10;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25179,7 +25264,7 @@ def SL2_loadrd_sp : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u5_3Imm:$Ii),
"$Rdd8 = memd(r29+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_16479122 {
+tc_70cabf66, TypeSUBINSN>, Enc_86a14b {
let Inst{12-8} = 0b11110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25194,7 +25279,7 @@ def SL2_loadrh_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii),
"$Rd16 = memh($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_4135257 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2bae10 {
let Inst{12-11} = 0b00;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25208,7 +25293,7 @@ def SL2_loadri_sp : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u5_2Imm:$Ii),
"$Rd16 = memw(r29+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_64199 {
+tc_70cabf66, TypeSUBINSN>, Enc_51635c {
let Inst{12-9} = 0b1110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25223,7 +25308,7 @@ def SL2_loadruh_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii),
"$Rd16 = memuh($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_4135257 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2bae10 {
let Inst{12-11} = 0b01;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25237,15 +25322,15 @@ def SL2_return : HInst<
(outs),
(ins),
"dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000000;
let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R30, R29, R31];
let DecoderNamespace = "SUBINSN_L2";
@@ -25254,7 +25339,7 @@ def SL2_return_f : HInst<
(outs),
(ins),
"if (!p0) dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25263,8 +25348,8 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25274,7 +25359,7 @@ def SL2_return_fnew : HInst<
(outs),
(ins),
"if (!p0.new) dealloc_return:nt",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25283,9 +25368,9 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25295,7 +25380,7 @@ def SL2_return_t : HInst<
(outs),
(ins),
"if (p0) dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000100;
let isPredicated = 1;
let isTerminator = 1;
@@ -25303,8 +25388,8 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25314,7 +25399,7 @@ def SL2_return_tnew : HInst<
(outs),
(ins),
"if (p0.new) dealloc_return:nt",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000110;
let isPredicated = 1;
let isTerminator = 1;
@@ -25322,9 +25407,9 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25334,7 +25419,7 @@ def SS1_storeb_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii, GeneralSubRegs:$Rt16),
"memb($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_13204995 {
+tc_53ee6546, TypeSUBINSN>, Enc_b38ffc {
let Inst{12-12} = 0b1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25346,7 +25431,7 @@ def SS1_storew_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii, GeneralSubRegs:$Rt16),
"memw($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_11205051 {
+tc_53ee6546, TypeSUBINSN>, Enc_f55a0c {
let Inst{12-12} = 0b0;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25358,7 +25443,7 @@ def SS2_allocframe : HInst<
(outs),
(ins u5_3Imm:$Ii),
"allocframe(#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_7884306 {
+tc_f027ebe9, TypeSUBINSN>, Enc_6f70ca {
let Inst{3-0} = 0b0000;
let Inst{12-9} = 0b1110;
let addrMode = BaseImmOffset;
@@ -25373,7 +25458,7 @@ def SS2_storebi0 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"memb($Rs16+#$Ii) = #0",
-PSEUDO, TypeSUBINSN>, Enc_13536408 {
+tc_6c52d277, TypeSUBINSN>, Enc_84d359 {
let Inst{12-8} = 0b10010;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25385,7 +25470,7 @@ def SS2_storebi1 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"memb($Rs16+#$Ii) = #1",
-PSEUDO, TypeSUBINSN>, Enc_13536408 {
+tc_6c52d277, TypeSUBINSN>, Enc_84d359 {
let Inst{12-8} = 0b10011;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25397,7 +25482,7 @@ def SS2_stored_sp : HInst<
(outs),
(ins s6_3Imm:$Ii, GeneralDoubleLow8Regs:$Rtt8),
"memd(r29+#$Ii) = $Rtt8",
-PSEUDO, TypeSUBINSN>, Enc_9165078 {
+tc_c14739d5, TypeSUBINSN>, Enc_b8309d {
let Inst{12-9} = 0b0101;
let addrMode = BaseImmOffset;
let accessSize = DoubleWordAccess;
@@ -25410,7 +25495,7 @@ def SS2_storeh_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii, GeneralSubRegs:$Rt16),
"memh($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_1734121 {
+tc_53ee6546, TypeSUBINSN>, Enc_625deb {
let Inst{12-11} = 0b00;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
@@ -25422,7 +25507,7 @@ def SS2_storew_sp : HInst<
(outs),
(ins u5_2Imm:$Ii, GeneralSubRegs:$Rt16),
"memw(r29+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_6690615 {
+tc_c14739d5, TypeSUBINSN>, Enc_87c142 {
let Inst{12-9} = 0b0100;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25435,7 +25520,7 @@ def SS2_storewi0 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"memw($Rs16+#$Ii) = #0",
-PSEUDO, TypeSUBINSN>, Enc_15536400 {
+tc_6c52d277, TypeSUBINSN>, Enc_a6ce9c {
let Inst{12-8} = 0b10000;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25447,7 +25532,7 @@ def SS2_storewi1 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"memw($Rs16+#$Ii) = #1",
-PSEUDO, TypeSUBINSN>, Enc_15536400 {
+tc_6c52d277, TypeSUBINSN>, Enc_a6ce9c {
let Inst{12-8} = 0b10001;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25759,7 +25844,7 @@ def V6_extractw : HInst<
(outs IntRegs:$Rd32),
(ins VectorRegs:$Vu32, IntRegs:$Rs32),
"$Rd32 = vextract($Vu32,$Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_16601956, Requires<[HasV60T,UseHVX]> {
+tc_9777e6bf, TypeLD>, Enc_50e578, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10010010000;
@@ -25773,7 +25858,7 @@ def V6_extractw_128B : HInst<
(outs IntRegs:$Rd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rs32),
"$Rd32 = vextract($Vu32,$Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_16601956, Requires<[HasV60T,UseHVX]> {
+tc_9777e6bf, TypeLD>, Enc_50e578, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10010010000;
@@ -25851,6 +25936,144 @@ let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
+def V6_ldcnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
def V6_ldnt0 : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
@@ -25874,6 +26097,144 @@ let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
+def V6_ldp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
def V6_ldu0 : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
@@ -25922,7 +26283,7 @@ def V6_lvsplatb : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.b = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25933,7 +26294,7 @@ def V6_lvsplatb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.b = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25945,7 +26306,7 @@ def V6_lvsplath : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.h = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25956,7 +26317,7 @@ def V6_lvsplath_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.h = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25968,7 +26329,7 @@ def V6_lvsplatw : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32 = vsplat($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_9768377, Requires<[HasV60T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX_LATE>, Enc_a5ed8a, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -25979,7 +26340,7 @@ def V6_lvsplatw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32 = vsplat($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_9768377, Requires<[HasV60T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX_LATE>, Enc_a5ed8a, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -25991,7 +26352,7 @@ def V6_pred_and : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = and($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26004,7 +26365,7 @@ def V6_pred_and_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = and($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26018,7 +26379,7 @@ def V6_pred_and_n : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = and($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26031,7 +26392,7 @@ def V6_pred_and_n_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = and($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26045,7 +26406,7 @@ def V6_pred_not : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4),
"$Qd4 = not($Qs4)",
-CVI_VA, TypeCVI_VA>, Enc_4897205, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_bfbf03, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b0001111000000011;
@@ -26057,7 +26418,7 @@ def V6_pred_not_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4),
"$Qd4 = not($Qs4)",
-CVI_VA, TypeCVI_VA>, Enc_4897205, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_bfbf03, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b0001111000000011;
@@ -26070,7 +26431,7 @@ def V6_pred_or : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = or($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26083,7 +26444,7 @@ def V6_pred_or_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = or($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26097,7 +26458,7 @@ def V6_pred_or_n : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = or($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26110,7 +26471,7 @@ def V6_pred_or_n_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = or($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26124,7 +26485,7 @@ def V6_pred_scalar2 : HInst<
(outs VecPredRegs:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV60T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV60T,UseHVX]> {
let Inst{13-2} = 0b000000010001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26135,7 +26496,7 @@ def V6_pred_scalar2_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV60T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV60T,UseHVX]> {
let Inst{13-2} = 0b000000010001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26147,7 +26508,7 @@ def V6_pred_scalar2v2 : HInst<
(outs VecPredRegs:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq2($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV62T,UseHVX]> {
let Inst{13-2} = 0b000000010011;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26158,7 +26519,7 @@ def V6_pred_scalar2v2_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq2($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV62T,UseHVX]> {
let Inst{13-2} = 0b000000010011;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26170,7 +26531,7 @@ def V6_pred_xor : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = xor($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000011;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26183,7 +26544,7 @@ def V6_pred_xor_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = xor($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000011;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26197,7 +26558,7 @@ def V6_shuffeqh : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26210,7 +26571,7 @@ def V6_shuffeqh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26224,7 +26585,7 @@ def V6_shuffeqw : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000111;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26237,7 +26598,7 @@ def V6_shuffeqw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000111;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26540,7 +26901,7 @@ def V6_vL32Ub_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmemu($Rt32+#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_35e92f8e, TypeCVI_VM_VP_LDU>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26556,7 +26917,7 @@ def V6_vL32Ub_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmemu($Rt32+#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_35e92f8e, TypeCVI_VM_VP_LDU>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26573,7 +26934,7 @@ def V6_vL32Ub_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmemu($Rx32++#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26590,7 +26951,7 @@ def V6_vL32Ub_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmemu($Rx32++#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26608,7 +26969,7 @@ def V6_vL32Ub_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmemu($Rx32++$Mu2)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26624,7 +26985,7 @@ def V6_vL32Ub_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmemu($Rx32++$Mu2)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26641,7 +27002,7 @@ def V6_vL32b_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26658,7 +27019,7 @@ def V6_vL32b_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26676,7 +27037,7 @@ def V6_vL32b_cur_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26693,7 +27054,7 @@ def V6_vL32b_cur_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26711,7 +27072,7 @@ def V6_vL32b_cur_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26729,7 +27090,7 @@ def V6_vL32b_cur_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26748,7 +27109,7 @@ def V6_vL32b_cur_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26768,7 +27129,7 @@ def V6_vL32b_cur_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26789,7 +27150,7 @@ def V6_vL32b_cur_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26808,7 +27169,7 @@ def V6_vL32b_cur_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26828,7 +27189,7 @@ def V6_vL32b_cur_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26846,7 +27207,7 @@ def V6_vL32b_cur_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26865,7 +27226,7 @@ def V6_vL32b_cur_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26882,7 +27243,7 @@ def V6_vL32b_cur_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26900,7 +27261,7 @@ def V6_vL32b_cur_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26917,7 +27278,7 @@ def V6_vL32b_cur_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26935,7 +27296,7 @@ def V6_vL32b_cur_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26954,7 +27315,7 @@ def V6_vL32b_cur_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26974,7 +27335,7 @@ def V6_vL32b_cur_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26992,7 +27353,7 @@ def V6_vL32b_cur_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27011,7 +27372,7 @@ def V6_vL32b_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -27028,7 +27389,7 @@ def V6_vL32b_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -27046,7 +27407,7 @@ def V6_vL32b_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -27065,7 +27426,7 @@ def V6_vL32b_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -27085,7 +27446,7 @@ def V6_vL32b_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27103,7 +27464,7 @@ def V6_vL32b_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27122,7 +27483,7 @@ def V6_vL32b_nt_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27131,8 +27492,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
}
@@ -27140,7 +27501,7 @@ def V6_vL32b_nt_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27149,8 +27510,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27159,7 +27520,7 @@ def V6_vL32b_nt_cur_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27177,7 +27538,7 @@ def V6_vL32b_nt_cur_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27196,7 +27557,7 @@ def V6_vL32b_nt_cur_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27215,7 +27576,7 @@ def V6_vL32b_nt_cur_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27235,7 +27596,7 @@ def V6_vL32b_nt_cur_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27256,7 +27617,7 @@ def V6_vL32b_nt_cur_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27278,7 +27639,7 @@ def V6_vL32b_nt_cur_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27298,7 +27659,7 @@ def V6_vL32b_nt_cur_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27319,7 +27680,7 @@ def V6_vL32b_nt_cur_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27338,7 +27699,7 @@ def V6_vL32b_nt_cur_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27358,7 +27719,7 @@ def V6_vL32b_nt_cur_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27376,7 +27737,7 @@ def V6_vL32b_nt_cur_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27395,7 +27756,7 @@ def V6_vL32b_nt_cur_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27413,7 +27774,7 @@ def V6_vL32b_nt_cur_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27432,7 +27793,7 @@ def V6_vL32b_nt_cur_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27452,7 +27813,7 @@ def V6_vL32b_nt_cur_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27473,7 +27834,7 @@ def V6_vL32b_nt_cur_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27492,7 +27853,7 @@ def V6_vL32b_nt_cur_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27512,7 +27873,7 @@ def V6_vL32b_nt_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27522,15 +27883,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27540,8 +27901,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27549,7 +27910,7 @@ def V6_vL32b_nt_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27560,8 +27921,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27569,7 +27930,7 @@ def V6_vL32b_nt_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27580,8 +27941,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27590,7 +27951,7 @@ def V6_vL32b_nt_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27600,8 +27961,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27609,7 +27970,7 @@ def V6_vL32b_nt_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27619,8 +27980,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27629,7 +27990,7 @@ def V6_vL32b_nt_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27638,8 +27999,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
@@ -27648,7 +28009,7 @@ def V6_vL32b_nt_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27657,8 +28018,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27668,7 +28029,7 @@ def V6_vL32b_nt_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27676,8 +28037,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
@@ -27686,7 +28047,7 @@ def V6_vL32b_nt_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27694,8 +28055,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27705,7 +28066,7 @@ def V6_vL32b_nt_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27714,15 +28075,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27731,8 +28092,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27740,7 +28101,7 @@ def V6_vL32b_nt_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27750,8 +28111,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27759,7 +28120,7 @@ def V6_vL32b_nt_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27769,8 +28130,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27779,7 +28140,7 @@ def V6_vL32b_nt_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27788,8 +28149,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27797,7 +28158,7 @@ def V6_vL32b_nt_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27806,8 +28167,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27816,7 +28177,7 @@ def V6_vL32b_nt_tmp_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27825,15 +28186,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27842,8 +28203,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27851,7 +28212,7 @@ def V6_vL32b_nt_tmp_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27861,15 +28222,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27879,8 +28240,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27888,7 +28249,7 @@ def V6_vL32b_nt_tmp_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27899,8 +28260,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27908,7 +28269,7 @@ def V6_vL32b_nt_tmp_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27919,8 +28280,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27929,7 +28290,7 @@ def V6_vL32b_nt_tmp_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27939,8 +28300,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27948,7 +28309,7 @@ def V6_vL32b_nt_tmp_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27958,8 +28319,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27968,7 +28329,7 @@ def V6_vL32b_nt_tmp_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27977,8 +28338,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27986,7 +28347,7 @@ def V6_vL32b_nt_tmp_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27995,8 +28356,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28005,7 +28366,7 @@ def V6_vL32b_nt_tmp_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -28013,8 +28374,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28022,7 +28383,7 @@ def V6_vL32b_nt_tmp_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -28030,8 +28391,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28040,7 +28401,7 @@ def V6_vL32b_nt_tmp_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -28049,15 +28410,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -28066,8 +28427,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -28075,7 +28436,7 @@ def V6_vL32b_nt_tmp_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -28085,8 +28446,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28094,7 +28455,7 @@ def V6_vL32b_nt_tmp_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -28104,8 +28465,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28114,7 +28475,7 @@ def V6_vL32b_nt_tmp_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -28123,8 +28484,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28132,7 +28493,7 @@ def V6_vL32b_nt_tmp_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -28141,8 +28502,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28151,7 +28512,7 @@ def V6_vL32b_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28169,7 +28530,7 @@ def V6_vL32b_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28188,7 +28549,7 @@ def V6_vL32b_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28205,7 +28566,7 @@ def V6_vL32b_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28223,7 +28584,7 @@ def V6_vL32b_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28239,7 +28600,7 @@ def V6_vL32b_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28256,7 +28617,7 @@ def V6_vL32b_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28274,7 +28635,7 @@ def V6_vL32b_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28293,7 +28654,7 @@ def V6_vL32b_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28310,7 +28671,7 @@ def V6_vL32b_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28328,7 +28689,7 @@ def V6_vL32b_tmp_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -28344,7 +28705,7 @@ def V6_vL32b_tmp_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -28361,7 +28722,7 @@ def V6_vL32b_tmp_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28378,7 +28739,7 @@ def V6_vL32b_tmp_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28396,7 +28757,7 @@ def V6_vL32b_tmp_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28415,7 +28776,7 @@ def V6_vL32b_tmp_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28435,7 +28796,7 @@ def V6_vL32b_tmp_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28453,7 +28814,7 @@ def V6_vL32b_tmp_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28472,7 +28833,7 @@ def V6_vL32b_tmp_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28489,7 +28850,7 @@ def V6_vL32b_tmp_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28507,7 +28868,7 @@ def V6_vL32b_tmp_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28523,7 +28884,7 @@ def V6_vL32b_tmp_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28540,7 +28901,7 @@ def V6_vL32b_tmp_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28556,7 +28917,7 @@ def V6_vL32b_tmp_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28573,7 +28934,7 @@ def V6_vL32b_tmp_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28591,7 +28952,7 @@ def V6_vL32b_tmp_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28610,7 +28971,7 @@ def V6_vL32b_tmp_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28627,7 +28988,7 @@ def V6_vL32b_tmp_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28645,7 +29006,7 @@ def V6_vS32Ub_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_354299ad, TypeCVI_VM_STU>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28660,7 +29021,7 @@ def V6_vS32Ub_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_354299ad, TypeCVI_VM_STU>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28676,7 +29037,7 @@ def V6_vS32Ub_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28691,7 +29052,7 @@ def V6_vS32Ub_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28707,7 +29068,7 @@ def V6_vS32Ub_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28724,7 +29085,7 @@ def V6_vS32Ub_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28742,7 +29103,7 @@ def V6_vS32Ub_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28758,7 +29119,7 @@ def V6_vS32Ub_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28775,7 +29136,7 @@ def V6_vS32Ub_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -28791,7 +29152,7 @@ def V6_vS32Ub_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -28808,7 +29169,7 @@ def V6_vS32Ub_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -28823,7 +29184,7 @@ def V6_vS32Ub_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -28839,7 +29200,7 @@ def V6_vS32Ub_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28853,7 +29214,7 @@ def V6_vS32Ub_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28868,7 +29229,7 @@ def V6_vS32Ub_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28884,7 +29245,7 @@ def V6_vS32Ub_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28901,7 +29262,7 @@ def V6_vS32Ub_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28916,7 +29277,7 @@ def V6_vS32Ub_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28932,7 +29293,7 @@ def V6_vS32b_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28948,7 +29309,7 @@ def V6_vS32b_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28965,7 +29326,7 @@ def V6_vS32b_new_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_6608821, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28984,7 +29345,7 @@ def V6_vS32b_new_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2152247, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -29004,7 +29365,7 @@ def V6_vS32b_new_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29023,7 +29384,7 @@ def V6_vS32b_new_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29043,7 +29404,7 @@ def V6_vS32b_new_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29064,7 +29425,7 @@ def V6_vS32b_new_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29086,7 +29447,7 @@ def V6_vS32b_new_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001101;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29106,7 +29467,7 @@ def V6_vS32b_new_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001101;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29127,7 +29488,7 @@ def V6_vS32b_new_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_12244921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -29147,7 +29508,7 @@ def V6_vS32b_new_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_11244923, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -29168,7 +29529,7 @@ def V6_vS32b_new_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -29187,7 +29548,7 @@ def V6_vS32b_new_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -29207,7 +29568,7 @@ def V6_vS32b_new_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29225,7 +29586,7 @@ def V6_vS32b_new_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29244,7 +29605,7 @@ def V6_vS32b_new_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29264,7 +29625,7 @@ def V6_vS32b_new_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29285,7 +29646,7 @@ def V6_vS32b_new_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29304,7 +29665,7 @@ def V6_vS32b_new_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29324,7 +29685,7 @@ def V6_vS32b_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29340,7 +29701,7 @@ def V6_vS32b_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29357,7 +29718,7 @@ def V6_vS32b_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29375,7 +29736,7 @@ def V6_vS32b_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29394,7 +29755,7 @@ def V6_vS32b_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29411,7 +29772,7 @@ def V6_vS32b_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29429,7 +29790,7 @@ def V6_vS32b_nqpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -29441,7 +29802,7 @@ def V6_vS32b_nqpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -29454,7 +29815,7 @@ def V6_vS32b_nqpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -29468,7 +29829,7 @@ def V6_vS32b_nqpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -29483,7 +29844,7 @@ def V6_vS32b_nqpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -29496,7 +29857,7 @@ def V6_vS32b_nqpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -29510,14 +29871,14 @@ def V6_vS32b_nt_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let isPredicable = 1;
@@ -29527,14 +29888,14 @@ def V6_vS32b_nt_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -29545,7 +29906,7 @@ def V6_vS32b_nt_new_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_6608821, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
@@ -29565,7 +29926,7 @@ def V6_vS32b_nt_new_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2152247, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
@@ -29586,7 +29947,7 @@ def V6_vS32b_nt_new_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29606,7 +29967,7 @@ def V6_vS32b_nt_new_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29627,7 +29988,7 @@ def V6_vS32b_nt_new_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29649,7 +30010,7 @@ def V6_vS32b_nt_new_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29672,7 +30033,7 @@ def V6_vS32b_nt_new_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001111;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29693,7 +30054,7 @@ def V6_vS32b_nt_new_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001111;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29715,7 +30076,7 @@ def V6_vS32b_nt_new_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_12244921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
@@ -29736,7 +30097,7 @@ def V6_vS32b_nt_new_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_11244923, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
@@ -29758,7 +30119,7 @@ def V6_vS32b_nt_new_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
@@ -29778,7 +30139,7 @@ def V6_vS32b_nt_new_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
@@ -29799,7 +30160,7 @@ def V6_vS32b_nt_new_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29818,7 +30179,7 @@ def V6_vS32b_nt_new_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29838,7 +30199,7 @@ def V6_vS32b_nt_new_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29859,7 +30220,7 @@ def V6_vS32b_nt_new_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29881,7 +30242,7 @@ def V6_vS32b_nt_new_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001010;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29901,7 +30262,7 @@ def V6_vS32b_nt_new_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001010;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29922,15 +30283,15 @@ def V6_vS32b_nt_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29939,15 +30300,15 @@ def V6_vS32b_nt_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29957,7 +30318,7 @@ def V6_vS32b_nt_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29965,8 +30326,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29976,7 +30337,7 @@ def V6_vS32b_nt_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29984,8 +30345,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29996,15 +30357,15 @@ def V6_vS32b_nt_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30014,15 +30375,15 @@ def V6_vS32b_nt_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30033,26 +30394,26 @@ def V6_vS32b_nt_nqpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vS32b_nt_nqpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -30060,14 +30421,14 @@ def V6_vS32b_nt_nqpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30075,14 +30436,14 @@ def V6_vS32b_nt_nqpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30091,13 +30452,13 @@ def V6_vS32b_nt_nqpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30105,13 +30466,13 @@ def V6_vS32b_nt_nqpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30120,14 +30481,14 @@ def V6_vS32b_nt_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30138,14 +30499,14 @@ def V6_vS32b_nt_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30157,13 +30518,13 @@ def V6_vS32b_nt_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30174,13 +30535,13 @@ def V6_vS32b_nt_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30192,14 +30553,14 @@ def V6_vS32b_nt_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30208,14 +30569,14 @@ def V6_vS32b_nt_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30225,15 +30586,15 @@ def V6_vS32b_nt_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30243,15 +30604,15 @@ def V6_vS32b_nt_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30262,14 +30623,14 @@ def V6_vS32b_nt_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30279,14 +30640,14 @@ def V6_vS32b_nt_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30297,26 +30658,26 @@ def V6_vS32b_nt_qpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vS32b_nt_qpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -30324,14 +30685,14 @@ def V6_vS32b_nt_qpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30339,14 +30700,14 @@ def V6_vS32b_nt_qpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30355,13 +30716,13 @@ def V6_vS32b_nt_qpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30369,13 +30730,13 @@ def V6_vS32b_nt_qpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30384,7 +30745,7 @@ def V6_vS32b_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -30401,7 +30762,7 @@ def V6_vS32b_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -30419,7 +30780,7 @@ def V6_vS32b_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -30434,7 +30795,7 @@ def V6_vS32b_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -30450,7 +30811,7 @@ def V6_vS32b_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -30465,7 +30826,7 @@ def V6_vS32b_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -30481,7 +30842,7 @@ def V6_vS32b_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -30498,7 +30859,7 @@ def V6_vS32b_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -30516,7 +30877,7 @@ def V6_vS32b_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -30531,7 +30892,7 @@ def V6_vS32b_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -30547,7 +30908,7 @@ def V6_vS32b_qpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -30559,7 +30920,7 @@ def V6_vS32b_qpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -30572,7 +30933,7 @@ def V6_vS32b_qpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -30586,7 +30947,7 @@ def V6_vS32b_qpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -30601,7 +30962,7 @@ def V6_vS32b_qpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -30614,7 +30975,7 @@ def V6_vS32b_qpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -30628,7 +30989,7 @@ def V6_vabsdiffh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30640,7 +31001,7 @@ def V6_vabsdiffh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30676,7 +31037,7 @@ def V6_vabsdiffub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vabsdiff($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30688,7 +31049,7 @@ def V6_vabsdiffub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vabsdiff($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30724,7 +31085,7 @@ def V6_vabsdiffuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.uh,$Vv32.uh)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30736,7 +31097,7 @@ def V6_vabsdiffuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.uh,$Vv32.uh)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30772,7 +31133,7 @@ def V6_vabsdiffw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vabsdiff($Vu32.w,$Vv32.w)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30784,7 +31145,7 @@ def V6_vabsdiffw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vabsdiff($Vu32.w,$Vv32.w)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30820,7 +31181,7 @@ def V6_vabsh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vabs($Vu32.h)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30832,7 +31193,7 @@ def V6_vabsh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vabs($Vu32.h)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30868,7 +31229,7 @@ def V6_vabsh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vabs($Vu32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30880,7 +31241,7 @@ def V6_vabsh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vabs($Vu32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30916,7 +31277,7 @@ def V6_vabsw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vabs($Vu32.w)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30928,7 +31289,7 @@ def V6_vabsw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vabs($Vu32.w)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30964,7 +31325,7 @@ def V6_vabsw_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vabs($Vu32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30976,7 +31337,7 @@ def V6_vabsw_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vabs($Vu32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -31012,7 +31373,7 @@ def V6_vaddb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31024,7 +31385,7 @@ def V6_vaddb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31060,7 +31421,7 @@ def V6_vaddb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31072,7 +31433,7 @@ def V6_vaddb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31108,7 +31469,7 @@ def V6_vaddbnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31123,7 +31484,7 @@ def V6_vaddbnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31166,7 +31527,7 @@ def V6_vaddbq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31181,7 +31542,7 @@ def V6_vaddbq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31224,7 +31585,7 @@ def V6_vaddbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -31236,7 +31597,7 @@ def V6_vaddbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -31272,7 +31633,7 @@ def V6_vaddbsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -31284,7 +31645,7 @@ def V6_vaddbsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -31320,7 +31681,7 @@ def V6_vaddcarry : HInst<
(outs VectorRegs:$Vd32, VecPredRegs:$Qx4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in),
"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -31335,7 +31696,7 @@ def V6_vaddcarry_128B : HInst<
(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in),
"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -31351,7 +31712,7 @@ def V6_vaddclbh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31363,7 +31724,7 @@ def V6_vaddclbh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31376,7 +31737,7 @@ def V6_vaddclbw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31388,7 +31749,7 @@ def V6_vaddclbw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31401,7 +31762,7 @@ def V6_vaddh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31413,7 +31774,7 @@ def V6_vaddh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31449,7 +31810,7 @@ def V6_vaddh_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31461,7 +31822,7 @@ def V6_vaddh_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31497,7 +31858,7 @@ def V6_vaddhnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31512,7 +31873,7 @@ def V6_vaddhnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31555,7 +31916,7 @@ def V6_vaddhq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31570,7 +31931,7 @@ def V6_vaddhq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31613,7 +31974,7 @@ def V6_vaddhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31625,7 +31986,7 @@ def V6_vaddhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31661,7 +32022,7 @@ def V6_vaddhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -31673,7 +32034,7 @@ def V6_vaddhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -31709,7 +32070,7 @@ def V6_vaddhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31721,7 +32082,7 @@ def V6_vaddhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31734,7 +32095,7 @@ def V6_vaddhw_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -31748,7 +32109,7 @@ def V6_vaddhw_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -31813,7 +32174,7 @@ def V6_vaddubh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31825,7 +32186,7 @@ def V6_vaddubh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31838,7 +32199,7 @@ def V6_vaddubh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -31852,7 +32213,7 @@ def V6_vaddubh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -31917,7 +32278,7 @@ def V6_vaddubsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31929,7 +32290,7 @@ def V6_vaddubsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31965,7 +32326,7 @@ def V6_vaddubsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.ub = vadd($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31977,7 +32338,7 @@ def V6_vaddubsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.ub = vadd($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32013,7 +32374,7 @@ def V6_vaddububb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32025,7 +32386,7 @@ def V6_vaddububb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32038,7 +32399,7 @@ def V6_vadduhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vadd($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32050,7 +32411,7 @@ def V6_vadduhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vadd($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32086,7 +32447,7 @@ def V6_vadduhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uh = vadd($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32098,7 +32459,7 @@ def V6_vadduhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uh = vadd($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32134,7 +32495,7 @@ def V6_vadduhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -32146,7 +32507,7 @@ def V6_vadduhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -32159,7 +32520,7 @@ def V6_vadduhw_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -32173,7 +32534,7 @@ def V6_vadduhw_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -32238,7 +32599,7 @@ def V6_vadduwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -32250,7 +32611,7 @@ def V6_vadduwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -32286,7 +32647,7 @@ def V6_vadduwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32298,7 +32659,7 @@ def V6_vadduwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32334,7 +32695,7 @@ def V6_vaddw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32346,7 +32707,7 @@ def V6_vaddw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32382,7 +32743,7 @@ def V6_vaddw_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32394,7 +32755,7 @@ def V6_vaddw_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32430,7 +32791,7 @@ def V6_vaddwnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32445,7 +32806,7 @@ def V6_vaddwnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32488,7 +32849,7 @@ def V6_vaddwq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32503,7 +32864,7 @@ def V6_vaddwq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32546,7 +32907,7 @@ def V6_vaddwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32558,7 +32919,7 @@ def V6_vaddwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32594,7 +32955,7 @@ def V6_vaddwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32606,7 +32967,7 @@ def V6_vaddwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32642,7 +33003,7 @@ def V6_valignb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = valign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -32654,7 +33015,7 @@ def V6_valignb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = valign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -32667,7 +33028,7 @@ def V6_valignbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32 = valign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -32678,7 +33039,7 @@ def V6_valignbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32 = valign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -32690,7 +33051,7 @@ def V6_vand : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vand($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -32702,7 +33063,7 @@ def V6_vand_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vand($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -32715,7 +33076,7 @@ def V6_vandnqrt : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX>, Enc_7b7ba8, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0001;
let Inst{31-21} = 0b00011001101;
@@ -32727,7 +33088,7 @@ def V6_vandnqrt_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX>, Enc_7b7ba8, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0001;
let Inst{31-21} = 0b00011001101;
@@ -32740,7 +33101,7 @@ def V6_vandnqrt_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX>, Enc_895bd9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b00011001011;
@@ -32754,7 +33115,7 @@ def V6_vandnqrt_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX>, Enc_895bd9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b00011001011;
@@ -32819,7 +33180,7 @@ def V6_vandqrt : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4711514, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_7b7ba8, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b00011001101;
@@ -32831,7 +33192,7 @@ def V6_vandqrt_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4711514, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_7b7ba8, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b00011001101;
@@ -32844,7 +33205,7 @@ def V6_vandqrt_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4944558, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_895bd9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b00011001011;
@@ -32858,7 +33219,7 @@ def V6_vandqrt_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4944558, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_895bd9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b00011001011;
@@ -32923,7 +33284,7 @@ def V6_vandvnqv : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vu32),
"$Vd32 = vand(!$Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32936,7 +33297,7 @@ def V6_vandvnqv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32),
"$Vd32 = vand(!$Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32950,7 +33311,7 @@ def V6_vandvqv : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vu32),
"$Vd32 = vand($Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32963,7 +33324,7 @@ def V6_vandvqv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32),
"$Vd32 = vand($Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32977,7 +33338,7 @@ def V6_vandvrt : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Qd4 = vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_11498120, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_0f8bab, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -32989,7 +33350,7 @@ def V6_vandvrt_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Qd4 = vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_11498120, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_0f8bab, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -33002,7 +33363,7 @@ def V6_vandvrt_acc : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Qx4 |= vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_10612292, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_adf111, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33016,7 +33377,7 @@ def V6_vandvrt_acc_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Qx4 |= vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_10612292, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_adf111, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33081,7 +33442,7 @@ def V6_vaslh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasl($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -33093,7 +33454,7 @@ def V6_vaslh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasl($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -33129,7 +33490,7 @@ def V6_vaslhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vasl($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33141,7 +33502,7 @@ def V6_vaslhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vasl($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33177,7 +33538,7 @@ def V6_vaslw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33189,7 +33550,7 @@ def V6_vaslw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33202,7 +33563,7 @@ def V6_vaslw_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33216,7 +33577,7 @@ def V6_vaslw_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33281,7 +33642,7 @@ def V6_vaslwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vasl($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33293,7 +33654,7 @@ def V6_vaslwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vasl($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33329,7 +33690,7 @@ def V6_vasrh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasr($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33341,7 +33702,7 @@ def V6_vasrh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasr($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33377,7 +33738,7 @@ def V6_vasrhbrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -33389,7 +33750,7 @@ def V6_vasrhbrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -33402,7 +33763,7 @@ def V6_vasrhbrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhb($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33412,7 +33773,7 @@ def V6_vasrhbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33424,7 +33785,7 @@ def V6_vasrhbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33437,7 +33798,7 @@ def V6_vasrhubrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33449,7 +33810,7 @@ def V6_vasrhubrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33462,7 +33823,7 @@ def V6_vasrhubrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhub($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33472,7 +33833,7 @@ def V6_vasrhubsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33484,7 +33845,7 @@ def V6_vasrhubsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33497,7 +33858,7 @@ def V6_vasrhubsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhub($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33507,7 +33868,7 @@ def V6_vasrhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vasr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33519,7 +33880,7 @@ def V6_vasrhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vasr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33555,7 +33916,7 @@ def V6_vasruwuhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33567,7 +33928,7 @@ def V6_vasruwuhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33580,7 +33941,7 @@ def V6_vasrw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33592,7 +33953,7 @@ def V6_vasrw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33605,7 +33966,7 @@ def V6_vasrw_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33619,7 +33980,7 @@ def V6_vasrw_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33684,7 +34045,7 @@ def V6_vasrwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8)",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33696,7 +34057,7 @@ def V6_vasrwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8)",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33709,7 +34070,7 @@ def V6_vasrwh_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8)",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33719,7 +34080,7 @@ def V6_vasrwhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33731,7 +34092,7 @@ def V6_vasrwhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33744,7 +34105,7 @@ def V6_vasrwhrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33754,7 +34115,7 @@ def V6_vasrwhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33766,7 +34127,7 @@ def V6_vasrwhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33779,7 +34140,7 @@ def V6_vasrwhsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33789,7 +34150,7 @@ def V6_vasrwuhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33801,7 +34162,7 @@ def V6_vasrwuhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33814,7 +34175,7 @@ def V6_vasrwuhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33826,7 +34187,7 @@ def V6_vasrwuhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33839,7 +34200,7 @@ def V6_vasrwuhsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwuh($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33849,7 +34210,7 @@ def V6_vasrwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vasr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33861,7 +34222,7 @@ def V6_vasrwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vasr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33897,7 +34258,7 @@ def V6_vassign : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000011;
@@ -33909,7 +34270,7 @@ def V6_vassign_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000011;
@@ -33922,7 +34283,7 @@ def V6_vassignp : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32),
"$Vdd32 = $Vuu32",
-CVI_VA, TypeCVI_VA>, Requires<[HasV60T,UseHVX]> {
+CVI_VA, TypeCVI_VA_DV>, Requires<[HasV60T,UseHVX]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33932,7 +34293,7 @@ def V6_vassignp_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32),
"$Vdd32 = $Vuu32",
-CVI_VA, TypeCVI_VA>, Requires<[HasV60T,UseHVX]> {
+CVI_VA, TypeCVI_VA_DV>, Requires<[HasV60T,UseHVX]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33943,7 +34304,7 @@ def V6_vavgh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -33955,7 +34316,7 @@ def V6_vavgh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -33991,7 +34352,7 @@ def V6_vavghrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34003,7 +34364,7 @@ def V6_vavghrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34039,7 +34400,7 @@ def V6_vavgub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34051,7 +34412,7 @@ def V6_vavgub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34087,7 +34448,7 @@ def V6_vavgubrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34099,7 +34460,7 @@ def V6_vavgubrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34135,7 +34496,7 @@ def V6_vavguh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34147,7 +34508,7 @@ def V6_vavguh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34183,7 +34544,7 @@ def V6_vavguhrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34195,7 +34556,7 @@ def V6_vavguhrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34231,7 +34592,7 @@ def V6_vavgw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34243,7 +34604,7 @@ def V6_vavgw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34279,7 +34640,7 @@ def V6_vavgwrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34291,7 +34652,7 @@ def V6_vavgwrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34327,7 +34688,7 @@ def V6_vccombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"if ($Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010011;
@@ -34340,7 +34701,7 @@ def V6_vccombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"if ($Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010011;
@@ -34354,7 +34715,7 @@ def V6_vcl0h : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.uh = vcl0($Vu32.uh)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34366,7 +34727,7 @@ def V6_vcl0h_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.uh = vcl0($Vu32.uh)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34402,7 +34763,7 @@ def V6_vcl0w : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.uw = vcl0($Vu32.uw)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34414,7 +34775,7 @@ def V6_vcl0w_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.uw = vcl0($Vu32.uw)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34450,7 +34811,7 @@ def V6_vcmov : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32),
"if ($Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000000000;
@@ -34463,7 +34824,7 @@ def V6_vcmov_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32),
"if ($Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000000000;
@@ -34477,7 +34838,7 @@ def V6_vcombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -34490,7 +34851,7 @@ def V6_vcombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -34527,7 +34888,7 @@ def V6_vdeal : HInst<
(outs VectorRegs:$Vy32, VectorRegs:$Vx32),
(ins VectorRegs:$Vy32in, VectorRegs:$Vx32in, IntRegs:$Rt32),
"vdeal($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -34542,7 +34903,7 @@ def V6_vdeal_128B : HInst<
(outs VectorRegs128B:$Vy32, VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vy32in, VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"vdeal($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -34558,7 +34919,7 @@ def V6_vdealb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.b = vdeal($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34570,7 +34931,7 @@ def V6_vdealb4w : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vdeale($Vu32.b,$Vv32.b)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34582,7 +34943,7 @@ def V6_vdealb4w_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vdeale($Vu32.b,$Vv32.b)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34618,7 +34979,7 @@ def V6_vdealb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.b = vdeal($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34654,7 +35015,7 @@ def V6_vdealh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vdeal($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34666,7 +35027,7 @@ def V6_vdealh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vdeal($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34702,7 +35063,7 @@ def V6_vdealvdd : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vdeal($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -34714,7 +35075,7 @@ def V6_vdealvdd_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vdeal($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -34727,7 +35088,7 @@ def V6_vdelta : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34739,7 +35100,7 @@ def V6_vdelta_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34752,7 +35113,7 @@ def V6_vdmpybus : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34764,7 +35125,7 @@ def V6_vdmpybus_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34777,7 +35138,7 @@ def V6_vdmpybus_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34791,7 +35152,7 @@ def V6_vdmpybus_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34856,7 +35217,7 @@ def V6_vdmpybus_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34868,7 +35229,7 @@ def V6_vdmpybus_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34881,7 +35242,7 @@ def V6_vdmpybus_dv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34895,7 +35256,7 @@ def V6_vdmpybus_dv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34960,7 +35321,7 @@ def V6_vdmpyhb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34972,7 +35333,7 @@ def V6_vdmpyhb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34985,7 +35346,7 @@ def V6_vdmpyhb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34999,7 +35360,7 @@ def V6_vdmpyhb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -35064,7 +35425,7 @@ def V6_vdmpyhb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35076,7 +35437,7 @@ def V6_vdmpyhb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35089,7 +35450,7 @@ def V6_vdmpyhb_dv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35103,7 +35464,7 @@ def V6_vdmpyhb_dv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35168,7 +35529,7 @@ def V6_vdmpyhisat : HInst<
(outs VectorRegs:$Vd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35180,7 +35541,7 @@ def V6_vdmpyhisat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35193,7 +35554,7 @@ def V6_vdmpyhisat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35207,7 +35568,7 @@ def V6_vdmpyhisat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35272,7 +35633,7 @@ def V6_vdmpyhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35284,7 +35645,7 @@ def V6_vdmpyhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35297,7 +35658,7 @@ def V6_vdmpyhsat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35311,7 +35672,7 @@ def V6_vdmpyhsat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35376,7 +35737,7 @@ def V6_vdmpyhsuisat : HInst<
(outs VectorRegs:$Vd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35388,7 +35749,7 @@ def V6_vdmpyhsuisat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35401,7 +35762,7 @@ def V6_vdmpyhsuisat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35415,7 +35776,7 @@ def V6_vdmpyhsuisat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35480,7 +35841,7 @@ def V6_vdmpyhsusat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35492,7 +35853,7 @@ def V6_vdmpyhsusat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35505,7 +35866,7 @@ def V6_vdmpyhsusat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35519,7 +35880,7 @@ def V6_vdmpyhsusat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35584,7 +35945,7 @@ def V6_vdmpyhvsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -35596,7 +35957,7 @@ def V6_vdmpyhvsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -35609,7 +35970,7 @@ def V6_vdmpyhvsat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -35623,7 +35984,7 @@ def V6_vdmpyhvsat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -35688,7 +36049,7 @@ def V6_vdsaduh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.uw = vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -35700,7 +36061,7 @@ def V6_vdsaduh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.uw = vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -35713,7 +36074,7 @@ def V6_vdsaduh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.uw += vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -35727,7 +36088,7 @@ def V6_vdsaduh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.uw += vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -35792,7 +36153,7 @@ def V6_veqb : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35804,7 +36165,7 @@ def V6_veqb_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35817,7 +36178,7 @@ def V6_veqb_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35830,7 +36191,7 @@ def V6_veqb_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35844,7 +36205,7 @@ def V6_veqb_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35858,7 +36219,7 @@ def V6_veqb_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35873,7 +36234,7 @@ def V6_veqb_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35886,7 +36247,7 @@ def V6_veqb_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35900,7 +36261,7 @@ def V6_veqh : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35912,7 +36273,7 @@ def V6_veqh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35925,7 +36286,7 @@ def V6_veqh_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35938,7 +36299,7 @@ def V6_veqh_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35952,7 +36313,7 @@ def V6_veqh_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35966,7 +36327,7 @@ def V6_veqh_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35981,7 +36342,7 @@ def V6_veqh_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35994,7 +36355,7 @@ def V6_veqh_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36008,7 +36369,7 @@ def V6_veqw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36020,7 +36381,7 @@ def V6_veqw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36033,7 +36394,7 @@ def V6_veqw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36046,7 +36407,7 @@ def V6_veqw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36060,7 +36421,7 @@ def V6_veqw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36074,7 +36435,7 @@ def V6_veqw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36089,7 +36450,7 @@ def V6_veqw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36102,7 +36463,7 @@ def V6_veqw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36116,7 +36477,7 @@ def V6_vgtb : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36128,7 +36489,7 @@ def V6_vgtb_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36141,7 +36502,7 @@ def V6_vgtb_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36154,7 +36515,7 @@ def V6_vgtb_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36168,7 +36529,7 @@ def V6_vgtb_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36182,7 +36543,7 @@ def V6_vgtb_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36197,7 +36558,7 @@ def V6_vgtb_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36210,7 +36571,7 @@ def V6_vgtb_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36224,7 +36585,7 @@ def V6_vgth : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36236,7 +36597,7 @@ def V6_vgth_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36249,7 +36610,7 @@ def V6_vgth_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36262,7 +36623,7 @@ def V6_vgth_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36276,7 +36637,7 @@ def V6_vgth_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36290,7 +36651,7 @@ def V6_vgth_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36305,7 +36666,7 @@ def V6_vgth_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36318,7 +36679,7 @@ def V6_vgth_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36332,7 +36693,7 @@ def V6_vgtub : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36344,7 +36705,7 @@ def V6_vgtub_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36357,7 +36718,7 @@ def V6_vgtub_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36370,7 +36731,7 @@ def V6_vgtub_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36384,7 +36745,7 @@ def V6_vgtub_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36398,7 +36759,7 @@ def V6_vgtub_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36413,7 +36774,7 @@ def V6_vgtub_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36426,7 +36787,7 @@ def V6_vgtub_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36440,7 +36801,7 @@ def V6_vgtuh : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36452,7 +36813,7 @@ def V6_vgtuh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36465,7 +36826,7 @@ def V6_vgtuh_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36478,7 +36839,7 @@ def V6_vgtuh_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36492,7 +36853,7 @@ def V6_vgtuh_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36506,7 +36867,7 @@ def V6_vgtuh_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36521,7 +36882,7 @@ def V6_vgtuh_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36534,7 +36895,7 @@ def V6_vgtuh_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36548,7 +36909,7 @@ def V6_vgtuw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36560,7 +36921,7 @@ def V6_vgtuw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36573,7 +36934,7 @@ def V6_vgtuw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36586,7 +36947,7 @@ def V6_vgtuw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36600,7 +36961,7 @@ def V6_vgtuw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36614,7 +36975,7 @@ def V6_vgtuw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36629,7 +36990,7 @@ def V6_vgtuw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36642,7 +37003,7 @@ def V6_vgtuw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36656,7 +37017,7 @@ def V6_vgtw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36668,7 +37029,7 @@ def V6_vgtw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36681,7 +37042,7 @@ def V6_vgtw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36694,7 +37055,7 @@ def V6_vgtw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36708,7 +37069,7 @@ def V6_vgtw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36722,7 +37083,7 @@ def V6_vgtw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36737,7 +37098,7 @@ def V6_vgtw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36750,7 +37111,7 @@ def V6_vgtw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36764,7 +37125,7 @@ def V6_vhist : HInst<
(outs),
(ins),
"vhist",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV60T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -36773,7 +37134,7 @@ def V6_vhist_128B : HInst<
(outs),
(ins),
"vhist",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV60T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -36783,7 +37144,7 @@ def V6_vhistq : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vhist($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV60T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -36793,7 +37154,7 @@ def V6_vhistq_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vhist($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV60T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -36804,7 +37165,7 @@ def V6_vinsertwr : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, IntRegs:$Rt32),
"$Vx32.w = vinsert($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_313333, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_569cfe, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b100000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -36816,7 +37177,7 @@ def V6_vinsertwr_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"$Vx32.w = vinsert($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_313333, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_569cfe, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b100000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -36829,7 +37190,7 @@ def V6_vlalignb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vlalign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -36841,7 +37202,7 @@ def V6_vlalignb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vlalign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -36854,7 +37215,7 @@ def V6_vlalignbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32 = vlalign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -36865,7 +37226,7 @@ def V6_vlalignbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32 = vlalign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -36877,7 +37238,7 @@ def V6_vlsrb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.ub = vlsr($Vu32.ub,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36889,7 +37250,7 @@ def V6_vlsrb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.ub = vlsr($Vu32.ub,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36902,7 +37263,7 @@ def V6_vlsrh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uh = vlsr($Vu32.uh,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36914,7 +37275,7 @@ def V6_vlsrh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uh = vlsr($Vu32.uh,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36950,7 +37311,7 @@ def V6_vlsrhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vlsr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -36962,7 +37323,7 @@ def V6_vlsrhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vlsr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -36998,7 +37359,7 @@ def V6_vlsrw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vlsr($Vu32.uw,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -37010,7 +37371,7 @@ def V6_vlsrw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vlsr($Vu32.uw,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -37046,7 +37407,7 @@ def V6_vlsrwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vlsr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -37058,7 +37419,7 @@ def V6_vlsrwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vlsr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -37094,7 +37455,7 @@ def V6_vlutvvb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37106,7 +37467,7 @@ def V6_vlutvvb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37119,7 +37480,7 @@ def V6_vlutvvb_nm : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37131,7 +37492,7 @@ def V6_vlutvvb_nm_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37144,7 +37505,7 @@ def V6_vlutvvb_oracc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8877260, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_245865, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37158,7 +37519,7 @@ def V6_vlutvvb_oracc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8877260, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_245865, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37173,7 +37534,7 @@ def V6_vlutvvb_oracci : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_cd4705, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100110;
let hasNewValue = 1;
@@ -37186,7 +37547,7 @@ def V6_vlutvvb_oracci_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_cd4705, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100110;
let hasNewValue = 1;
@@ -37200,7 +37561,7 @@ def V6_vlutvvbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -37211,7 +37572,7 @@ def V6_vlutvvbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -37223,7 +37584,7 @@ def V6_vlutvwh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37235,7 +37596,7 @@ def V6_vlutvwh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37248,7 +37609,7 @@ def V6_vlutvwh_nm : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37260,7 +37621,7 @@ def V6_vlutvwh_nm_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37273,7 +37634,7 @@ def V6_vlutvwh_oracc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_16213761, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_7b523d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37287,7 +37648,7 @@ def V6_vlutvwh_oracc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_16213761, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_7b523d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37302,7 +37663,7 @@ def V6_vlutvwh_oracci : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_1178da, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100111;
let hasNewValue = 1;
@@ -37315,7 +37676,7 @@ def V6_vlutvwh_oracci_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_1178da, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100111;
let hasNewValue = 1;
@@ -37329,7 +37690,7 @@ def V6_vlutvwhi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_4b39e4, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -37340,7 +37701,7 @@ def V6_vlutvwhi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_4b39e4, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -37352,7 +37713,7 @@ def V6_vmaxb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vmax($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37364,7 +37725,7 @@ def V6_vmaxb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vmax($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37400,7 +37761,7 @@ def V6_vmaxh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmax($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37412,7 +37773,7 @@ def V6_vmaxh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmax($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37448,7 +37809,7 @@ def V6_vmaxub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vmax($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37460,7 +37821,7 @@ def V6_vmaxub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vmax($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37496,7 +37857,7 @@ def V6_vmaxuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vmax($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37508,7 +37869,7 @@ def V6_vmaxuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vmax($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37544,7 +37905,7 @@ def V6_vmaxw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmax($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37556,7 +37917,7 @@ def V6_vmaxw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmax($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37592,7 +37953,7 @@ def V6_vminb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vmin($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37604,7 +37965,7 @@ def V6_vminb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vmin($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37640,7 +38001,7 @@ def V6_vminh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmin($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37652,7 +38013,7 @@ def V6_vminh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmin($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37688,7 +38049,7 @@ def V6_vminub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vmin($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37700,7 +38061,7 @@ def V6_vminub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vmin($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37736,7 +38097,7 @@ def V6_vminuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vmin($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37748,7 +38109,7 @@ def V6_vminuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vmin($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37784,7 +38145,7 @@ def V6_vminw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmin($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37796,7 +38157,7 @@ def V6_vminw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmin($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37832,7 +38193,7 @@ def V6_vmpabus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -37844,7 +38205,7 @@ def V6_vmpabus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -37857,7 +38218,7 @@ def V6_vmpabus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -37871,7 +38232,7 @@ def V6_vmpabus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -37936,7 +38297,7 @@ def V6_vmpabusv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -37948,7 +38309,7 @@ def V6_vmpabusv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -37984,7 +38345,7 @@ def V6_vmpabuuv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -37996,7 +38357,7 @@ def V6_vmpabuuv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -38032,7 +38393,7 @@ def V6_vmpahb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38044,7 +38405,7 @@ def V6_vmpahb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38057,7 +38418,7 @@ def V6_vmpahb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38071,7 +38432,7 @@ def V6_vmpahb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38136,7 +38497,7 @@ def V6_vmpauhb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -38148,7 +38509,7 @@ def V6_vmpauhb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -38161,7 +38522,7 @@ def V6_vmpauhb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -38175,7 +38536,7 @@ def V6_vmpauhb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -38240,7 +38601,7 @@ def V6_vmpybus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.h = vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38252,7 +38613,7 @@ def V6_vmpybus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.h = vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38265,7 +38626,7 @@ def V6_vmpybus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.h += vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38279,7 +38640,7 @@ def V6_vmpybus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.h += vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38344,7 +38705,7 @@ def V6_vmpybusv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38356,7 +38717,7 @@ def V6_vmpybusv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38369,7 +38730,7 @@ def V6_vmpybusv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38383,7 +38744,7 @@ def V6_vmpybusv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38448,7 +38809,7 @@ def V6_vmpybv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38460,7 +38821,7 @@ def V6_vmpybv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38473,7 +38834,7 @@ def V6_vmpybv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38487,7 +38848,7 @@ def V6_vmpybv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38552,7 +38913,7 @@ def V6_vmpyewuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -38564,7 +38925,7 @@ def V6_vmpyewuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -38577,7 +38938,7 @@ def V6_vmpyewuh_64 : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -38589,7 +38950,7 @@ def V6_vmpyewuh_64_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -38625,7 +38986,7 @@ def V6_vmpyh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.w = vmpy($Vu32.h,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38637,7 +38998,7 @@ def V6_vmpyh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.w = vmpy($Vu32.h,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38673,7 +39034,7 @@ def V6_vmpyhsat_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.w += vmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -38687,7 +39048,7 @@ def V6_vmpyhsat_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.w += vmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -38729,7 +39090,7 @@ def V6_vmpyhsrs : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38741,7 +39102,7 @@ def V6_vmpyhsrs_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38777,7 +39138,7 @@ def V6_vmpyhss : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38789,7 +39150,7 @@ def V6_vmpyhss_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38825,7 +39186,7 @@ def V6_vmpyhus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -38837,7 +39198,7 @@ def V6_vmpyhus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -38850,7 +39211,7 @@ def V6_vmpyhus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -38864,7 +39225,7 @@ def V6_vmpyhus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -38929,7 +39290,7 @@ def V6_vmpyhv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38941,7 +39302,7 @@ def V6_vmpyhv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38954,7 +39315,7 @@ def V6_vmpyhv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38968,7 +39329,7 @@ def V6_vmpyhv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -39033,7 +39394,7 @@ def V6_vmpyhvsrs : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmpy($Vu32.h,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39045,7 +39406,7 @@ def V6_vmpyhvsrs_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmpy($Vu32.h,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39081,7 +39442,7 @@ def V6_vmpyieoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyieo($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -39093,7 +39454,7 @@ def V6_vmpyieoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyieo($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -39106,7 +39467,7 @@ def V6_vmpyiewh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -39120,7 +39481,7 @@ def V6_vmpyiewh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -39162,7 +39523,7 @@ def V6_vmpyiewuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39174,7 +39535,7 @@ def V6_vmpyiewuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39187,7 +39548,7 @@ def V6_vmpyiewuh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39201,7 +39562,7 @@ def V6_vmpyiewuh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39266,7 +39627,7 @@ def V6_vmpyih : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39278,7 +39639,7 @@ def V6_vmpyih_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39291,7 +39652,7 @@ def V6_vmpyih_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.h += vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39305,7 +39666,7 @@ def V6_vmpyih_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.h += vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39370,7 +39731,7 @@ def V6_vmpyihb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -39382,7 +39743,7 @@ def V6_vmpyihb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -39395,7 +39756,7 @@ def V6_vmpyihb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -39409,7 +39770,7 @@ def V6_vmpyihb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -39474,7 +39835,7 @@ def V6_vmpyiowh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyio($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39486,7 +39847,7 @@ def V6_vmpyiowh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyio($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39522,7 +39883,7 @@ def V6_vmpyiwb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -39534,7 +39895,7 @@ def V6_vmpyiwb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -39547,7 +39908,7 @@ def V6_vmpyiwb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39561,7 +39922,7 @@ def V6_vmpyiwb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39626,7 +39987,7 @@ def V6_vmpyiwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39638,7 +39999,7 @@ def V6_vmpyiwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39651,7 +40012,7 @@ def V6_vmpyiwh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39665,7 +40026,7 @@ def V6_vmpyiwh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39730,7 +40091,7 @@ def V6_vmpyiwub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39742,7 +40103,7 @@ def V6_vmpyiwub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39755,7 +40116,7 @@ def V6_vmpyiwub_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -39769,7 +40130,7 @@ def V6_vmpyiwub_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -39834,7 +40195,7 @@ def V6_vmpyowh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -39846,7 +40207,7 @@ def V6_vmpyowh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -39859,7 +40220,7 @@ def V6_vmpyowh_64_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39873,7 +40234,7 @@ def V6_vmpyowh_64_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39911,7 +40272,7 @@ def V6_vmpyowh_rnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -39923,7 +40284,7 @@ def V6_vmpyowh_rnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -39959,7 +40320,7 @@ def V6_vmpyowh_rnd_sacc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39973,7 +40334,7 @@ def V6_vmpyowh_rnd_sacc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40013,7 +40374,7 @@ def V6_vmpyowh_sacc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40027,7 +40388,7 @@ def V6_vmpyowh_sacc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40067,7 +40428,7 @@ def V6_vmpyub : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.uh = vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001110;
@@ -40079,7 +40440,7 @@ def V6_vmpyub_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.uh = vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001110;
@@ -40092,7 +40453,7 @@ def V6_vmpyub_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.uh += vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -40106,7 +40467,7 @@ def V6_vmpyub_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.uh += vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -40171,7 +40532,7 @@ def V6_vmpyubv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.uh = vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -40183,7 +40544,7 @@ def V6_vmpyubv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.uh = vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -40196,7 +40557,7 @@ def V6_vmpyubv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.uh += vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -40210,7 +40571,7 @@ def V6_vmpyubv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.uh += vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -40275,7 +40636,7 @@ def V6_vmpyuh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.uw = vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -40287,7 +40648,7 @@ def V6_vmpyuh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.uw = vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -40300,7 +40661,7 @@ def V6_vmpyuh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.uw += vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -40314,7 +40675,7 @@ def V6_vmpyuh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.uw += vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -40379,7 +40740,7 @@ def V6_vmpyuhv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.uw = vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40391,7 +40752,7 @@ def V6_vmpyuhv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.uw = vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40404,7 +40765,7 @@ def V6_vmpyuhv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.uw += vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40418,7 +40779,7 @@ def V6_vmpyuhv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.uw += vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40483,7 +40844,7 @@ def V6_vmux : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qt4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vmux($Qt4,$Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_1572239, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_31db33, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110111;
@@ -40495,7 +40856,7 @@ def V6_vmux_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qt4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vmux($Qt4,$Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_1572239, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_31db33, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110111;
@@ -40508,7 +40869,7 @@ def V6_vnavgh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vnavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40520,7 +40881,7 @@ def V6_vnavgh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vnavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40556,7 +40917,7 @@ def V6_vnavgub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vnavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40568,7 +40929,7 @@ def V6_vnavgub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vnavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40604,7 +40965,7 @@ def V6_vnavgw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vnavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40616,7 +40977,7 @@ def V6_vnavgw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vnavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40652,7 +41013,7 @@ def V6_vnccombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"if (!$Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010010;
@@ -40666,7 +41027,7 @@ def V6_vnccombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"if (!$Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010010;
@@ -40681,7 +41042,7 @@ def V6_vncmov : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32),
"if (!$Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000100000;
@@ -40695,7 +41056,7 @@ def V6_vncmov_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32),
"if (!$Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000100000;
@@ -40710,7 +41071,7 @@ def V6_vnormamth : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vnormamt($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40722,7 +41083,7 @@ def V6_vnormamth_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vnormamt($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40758,7 +41119,7 @@ def V6_vnormamtw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vnormamt($Vu32.w)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40770,7 +41131,7 @@ def V6_vnormamtw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vnormamt($Vu32.w)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40806,7 +41167,7 @@ def V6_vnot : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32 = vnot($Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -40818,7 +41179,7 @@ def V6_vnot_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32 = vnot($Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -40831,7 +41192,7 @@ def V6_vor : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40843,7 +41204,7 @@ def V6_vor_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40856,7 +41217,7 @@ def V6_vpackeb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpacke($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40868,7 +41229,7 @@ def V6_vpackeb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpacke($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40904,7 +41265,7 @@ def V6_vpackeh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpacke($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40916,7 +41277,7 @@ def V6_vpackeh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpacke($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40952,7 +41313,7 @@ def V6_vpackhb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40964,7 +41325,7 @@ def V6_vpackhb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41000,7 +41361,7 @@ def V6_vpackhub_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41012,7 +41373,7 @@ def V6_vpackhub_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41048,7 +41409,7 @@ def V6_vpackob : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpacko($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41060,7 +41421,7 @@ def V6_vpackob_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpacko($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41096,7 +41457,7 @@ def V6_vpackoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpacko($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41108,7 +41469,7 @@ def V6_vpackoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpacko($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41144,7 +41505,7 @@ def V6_vpackwh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41156,7 +41517,7 @@ def V6_vpackwh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41192,7 +41553,7 @@ def V6_vpackwuh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41204,7 +41565,7 @@ def V6_vpackwuh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41240,7 +41601,7 @@ def V6_vpopcounth : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vpopcount($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -41252,7 +41613,7 @@ def V6_vpopcounth_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vpopcount($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -41288,7 +41649,7 @@ def V6_vrdelta : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vrdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -41300,7 +41661,7 @@ def V6_vrdelta_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vrdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -41313,7 +41674,7 @@ def V6_vrmpybus : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41325,7 +41686,7 @@ def V6_vrmpybus_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41338,7 +41699,7 @@ def V6_vrmpybus_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41352,7 +41713,7 @@ def V6_vrmpybus_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41417,7 +41778,7 @@ def V6_vrmpybusi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.w = vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -41429,7 +41790,7 @@ def V6_vrmpybusi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.w = vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -41442,7 +41803,7 @@ def V6_vrmpybusi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.w += vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -41456,7 +41817,7 @@ def V6_vrmpybusi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.w += vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -41521,7 +41882,7 @@ def V6_vrmpybusv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41533,7 +41894,7 @@ def V6_vrmpybusv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41546,7 +41907,7 @@ def V6_vrmpybusv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41560,7 +41921,7 @@ def V6_vrmpybusv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41625,7 +41986,7 @@ def V6_vrmpybv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41637,7 +41998,7 @@ def V6_vrmpybv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41650,7 +42011,7 @@ def V6_vrmpybv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41664,7 +42025,7 @@ def V6_vrmpybv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41729,7 +42090,7 @@ def V6_vrmpyub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41741,7 +42102,7 @@ def V6_vrmpyub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41754,7 +42115,7 @@ def V6_vrmpyub_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.uw += vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41768,7 +42129,7 @@ def V6_vrmpyub_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.uw += vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41833,7 +42194,7 @@ def V6_vrmpyubi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -41845,7 +42206,7 @@ def V6_vrmpyubi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -41858,7 +42219,7 @@ def V6_vrmpyubi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -41872,7 +42233,7 @@ def V6_vrmpyubi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -41937,7 +42298,7 @@ def V6_vrmpyubv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41949,7 +42310,7 @@ def V6_vrmpyubv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41962,7 +42323,7 @@ def V6_vrmpyubv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.uw += vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41976,7 +42337,7 @@ def V6_vrmpyubv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.uw += vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -42041,7 +42402,7 @@ def V6_vror : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32 = vror($Vu32,$Rt32)",
-CVI_VP, TypeCVI_VP>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_bf142ae2, TypeCVI_VP>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -42053,7 +42414,7 @@ def V6_vror_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32 = vror($Vu32,$Rt32)",
-CVI_VP, TypeCVI_VP>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_bf142ae2, TypeCVI_VP>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -42066,7 +42427,7 @@ def V6_vroundhb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42078,7 +42439,7 @@ def V6_vroundhb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42114,7 +42475,7 @@ def V6_vroundhub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42126,7 +42487,7 @@ def V6_vroundhub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42162,7 +42523,7 @@ def V6_vrounduhub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42174,7 +42535,7 @@ def V6_vrounduhub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42210,7 +42571,7 @@ def V6_vrounduwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42222,7 +42583,7 @@ def V6_vrounduwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42258,7 +42619,7 @@ def V6_vroundwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42270,7 +42631,7 @@ def V6_vroundwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42306,7 +42667,7 @@ def V6_vroundwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42318,7 +42679,7 @@ def V6_vroundwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42354,7 +42715,7 @@ def V6_vrsadubi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -42366,7 +42727,7 @@ def V6_vrsadubi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -42379,7 +42740,7 @@ def V6_vrsadubi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -42393,7 +42754,7 @@ def V6_vrsadubi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -42458,7 +42819,7 @@ def V6_vsathub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsat($Vu32.h,$Vv32.h)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42470,7 +42831,7 @@ def V6_vsathub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsat($Vu32.h,$Vv32.h)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42506,7 +42867,7 @@ def V6_vsatuwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -42518,7 +42879,7 @@ def V6_vsatuwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -42554,7 +42915,7 @@ def V6_vsatwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsat($Vu32.w,$Vv32.w)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42566,7 +42927,7 @@ def V6_vsatwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsat($Vu32.w,$Vv32.w)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42602,7 +42963,7 @@ def V6_vsb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.h = vsxt($Vu32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42614,7 +42975,7 @@ def V6_vsb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.h = vsxt($Vu32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42650,7 +43011,7 @@ def V6_vsh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.w = vsxt($Vu32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42662,7 +43023,7 @@ def V6_vsh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.w = vsxt($Vu32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42698,7 +43059,7 @@ def V6_vshufeh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vshuffe($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42710,7 +43071,7 @@ def V6_vshufeh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vshuffe($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42746,7 +43107,7 @@ def V6_vshuff : HInst<
(outs VectorRegs:$Vy32, VectorRegs:$Vx32),
(ins VectorRegs:$Vy32in, VectorRegs:$Vx32in, IntRegs:$Rt32),
"vshuff($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -42761,7 +43122,7 @@ def V6_vshuff_128B : HInst<
(outs VectorRegs128B:$Vy32, VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vy32in, VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"vshuff($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -42777,7 +43138,7 @@ def V6_vshuffb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.b = vshuff($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42789,7 +43150,7 @@ def V6_vshuffb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.b = vshuff($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42825,7 +43186,7 @@ def V6_vshuffeb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vshuffe($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42837,7 +43198,7 @@ def V6_vshuffeb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vshuffe($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42873,7 +43234,7 @@ def V6_vshuffh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vshuff($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -42885,7 +43246,7 @@ def V6_vshuffh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vshuff($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -42921,7 +43282,7 @@ def V6_vshuffob : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vshuffo($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42933,7 +43294,7 @@ def V6_vshuffob_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vshuffo($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42969,7 +43330,7 @@ def V6_vshuffvdd : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vshuff($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -42981,7 +43342,7 @@ def V6_vshuffvdd_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vshuff($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -42994,7 +43355,7 @@ def V6_vshufoeb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.b = vshuffoe($Vu32.b,$Vv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43006,7 +43367,7 @@ def V6_vshufoeb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.b = vshuffoe($Vu32.b,$Vv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43042,7 +43403,7 @@ def V6_vshufoeh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vshuffoe($Vu32.h,$Vv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43054,7 +43415,7 @@ def V6_vshufoeh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vshuffoe($Vu32.h,$Vv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43090,7 +43451,7 @@ def V6_vshufoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vshuffo($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43102,7 +43463,7 @@ def V6_vshufoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vshuffo($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43138,7 +43499,7 @@ def V6_vsubb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43150,7 +43511,7 @@ def V6_vsubb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43186,7 +43547,7 @@ def V6_vsubb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43198,7 +43559,7 @@ def V6_vsubb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43234,7 +43595,7 @@ def V6_vsubbnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43248,7 +43609,7 @@ def V6_vsubbnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43288,7 +43649,7 @@ def V6_vsubbq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43302,7 +43663,7 @@ def V6_vsubbq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43342,7 +43703,7 @@ def V6_vsubbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -43354,7 +43715,7 @@ def V6_vsubbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -43390,7 +43751,7 @@ def V6_vsubbsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43402,7 +43763,7 @@ def V6_vsubbsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43438,7 +43799,7 @@ def V6_vsubcarry : HInst<
(outs VectorRegs:$Vd32, VecPredRegs:$Qx4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in),
"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -43453,7 +43814,7 @@ def V6_vsubcarry_128B : HInst<
(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in),
"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -43469,7 +43830,7 @@ def V6_vsubh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43481,7 +43842,7 @@ def V6_vsubh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43517,7 +43878,7 @@ def V6_vsubh_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43529,7 +43890,7 @@ def V6_vsubh_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43565,7 +43926,7 @@ def V6_vsubhnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43579,7 +43940,7 @@ def V6_vsubhnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43619,7 +43980,7 @@ def V6_vsubhq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43633,7 +43994,7 @@ def V6_vsubhq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43673,7 +44034,7 @@ def V6_vsubhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43685,7 +44046,7 @@ def V6_vsubhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43721,7 +44082,7 @@ def V6_vsubhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43733,7 +44094,7 @@ def V6_vsubhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43769,7 +44130,7 @@ def V6_vsubhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vsub($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43781,7 +44142,7 @@ def V6_vsubhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vsub($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43817,7 +44178,7 @@ def V6_vsububh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vsub($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43829,7 +44190,7 @@ def V6_vsububh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vsub($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43865,7 +44226,7 @@ def V6_vsububsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43877,7 +44238,7 @@ def V6_vsububsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43913,7 +44274,7 @@ def V6_vsububsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.ub = vsub($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43925,7 +44286,7 @@ def V6_vsububsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.ub = vsub($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43961,7 +44322,7 @@ def V6_vsubububb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43973,7 +44334,7 @@ def V6_vsubububb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43986,7 +44347,7 @@ def V6_vsubuhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vsub($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43998,7 +44359,7 @@ def V6_vsubuhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vsub($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44034,7 +44395,7 @@ def V6_vsubuhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uh = vsub($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44046,7 +44407,7 @@ def V6_vsubuhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uh = vsub($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44082,7 +44443,7 @@ def V6_vsubuhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vsub($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44094,7 +44455,7 @@ def V6_vsubuhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vsub($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44130,7 +44491,7 @@ def V6_vsubuwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -44142,7 +44503,7 @@ def V6_vsubuwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -44178,7 +44539,7 @@ def V6_vsubuwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -44190,7 +44551,7 @@ def V6_vsubuwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -44226,7 +44587,7 @@ def V6_vsubw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -44238,7 +44599,7 @@ def V6_vsubw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -44274,7 +44635,7 @@ def V6_vsubw_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44286,7 +44647,7 @@ def V6_vsubw_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44322,7 +44683,7 @@ def V6_vsubwnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44336,7 +44697,7 @@ def V6_vsubwnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44376,7 +44737,7 @@ def V6_vsubwq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44390,7 +44751,7 @@ def V6_vsubwq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44430,7 +44791,7 @@ def V6_vsubwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44442,7 +44803,7 @@ def V6_vsubwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44478,7 +44839,7 @@ def V6_vsubwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44490,7 +44851,7 @@ def V6_vsubwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44526,7 +44887,7 @@ def V6_vswap : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecPredRegs:$Qt4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vswap($Qt4,$Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_11424254, Requires<[HasV60T,UseHVX]> {
+tc_316c637c, TypeCVI_VA_DV>, Enc_3dac0b, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110101;
@@ -44538,7 +44899,7 @@ def V6_vswap_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecPredRegs128B:$Qt4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vswap($Qt4,$Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_11424254, Requires<[HasV60T,UseHVX]> {
+tc_316c637c, TypeCVI_VA_DV>, Enc_3dac0b, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110101;
@@ -44551,7 +44912,7 @@ def V6_vtmpyb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44563,7 +44924,7 @@ def V6_vtmpyb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44576,7 +44937,7 @@ def V6_vtmpyb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44590,7 +44951,7 @@ def V6_vtmpyb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44655,7 +45016,7 @@ def V6_vtmpybus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44667,7 +45028,7 @@ def V6_vtmpybus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44680,7 +45041,7 @@ def V6_vtmpybus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44694,7 +45055,7 @@ def V6_vtmpybus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44759,7 +45120,7 @@ def V6_vtmpyhb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -44771,7 +45132,7 @@ def V6_vtmpyhb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -44784,7 +45145,7 @@ def V6_vtmpyhb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44798,7 +45159,7 @@ def V6_vtmpyhb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44892,7 +45253,7 @@ def V6_vunpackb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.h = vunpack($Vu32.b)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44904,7 +45265,7 @@ def V6_vunpackb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.h = vunpack($Vu32.b)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44940,7 +45301,7 @@ def V6_vunpackh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.w = vunpack($Vu32.h)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44952,7 +45313,7 @@ def V6_vunpackh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.w = vunpack($Vu32.h)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44988,7 +45349,7 @@ def V6_vunpackob : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32),
"$Vxx32.h |= vunpacko($Vu32.b)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45002,7 +45363,7 @@ def V6_vunpackob_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32),
"$Vxx32.h |= vunpacko($Vu32.b)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45042,7 +45403,7 @@ def V6_vunpackoh : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32),
"$Vxx32.w |= vunpacko($Vu32.h)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45056,7 +45417,7 @@ def V6_vunpackoh_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32),
"$Vxx32.w |= vunpacko($Vu32.h)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45098,7 +45459,7 @@ def V6_vunpackub : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uh = vunpack($Vu32.ub)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45110,7 +45471,7 @@ def V6_vunpackub_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uh = vunpack($Vu32.ub)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45146,7 +45507,7 @@ def V6_vunpackuh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uw = vunpack($Vu32.uh)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45158,7 +45519,7 @@ def V6_vunpackuh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uw = vunpack($Vu32.uh)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45194,7 +45555,7 @@ def V6_vwhist128 : HInst<
(outs),
(ins),
"vwhist128",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45203,7 +45564,7 @@ def V6_vwhist128_128B : HInst<
(outs),
(ins),
"vwhist128",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45213,7 +45574,7 @@ def V6_vwhist128m : HInst<
(outs),
(ins u1_0Imm:$Ii),
"vwhist128(#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> {
+tc_b77635b4, TypeCVI_HIST>, Enc_efaed8, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{31-16} = 0b0001111000000000;
@@ -45223,7 +45584,7 @@ def V6_vwhist128m_128B : HInst<
(outs),
(ins u1_0Imm:$Ii),
"vwhist128(#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> {
+tc_b77635b4, TypeCVI_HIST>, Enc_efaed8, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{31-16} = 0b0001111000000000;
@@ -45234,7 +45595,7 @@ def V6_vwhist128q : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist128($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45244,7 +45605,7 @@ def V6_vwhist128q_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist128($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45255,7 +45616,7 @@ def V6_vwhist128qm : HInst<
(outs),
(ins VecPredRegs:$Qv4, u1_0Imm:$Ii),
"vwhist128($Qv4,#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> {
+tc_28978789, TypeCVI_HIST>, Enc_802dc0, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{21-16} = 0b000010;
@@ -45266,7 +45627,7 @@ def V6_vwhist128qm_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, u1_0Imm:$Ii),
"vwhist128($Qv4,#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> {
+tc_28978789, TypeCVI_HIST>, Enc_802dc0, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{21-16} = 0b000010;
@@ -45278,7 +45639,7 @@ def V6_vwhist256 : HInst<
(outs),
(ins),
"vwhist256",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45287,7 +45648,7 @@ def V6_vwhist256_128B : HInst<
(outs),
(ins),
"vwhist256",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45297,7 +45658,7 @@ def V6_vwhist256_sat : HInst<
(outs),
(ins),
"vwhist256:sat",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45306,7 +45667,7 @@ def V6_vwhist256_sat_128B : HInst<
(outs),
(ins),
"vwhist256:sat",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45316,7 +45677,7 @@ def V6_vwhist256q : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist256($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45326,7 +45687,7 @@ def V6_vwhist256q_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist256($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45337,7 +45698,7 @@ def V6_vwhist256q_sat : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist256($Qv4):sat",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45347,7 +45708,7 @@ def V6_vwhist256q_sat_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist256($Qv4):sat",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45358,7 +45719,7 @@ def V6_vxor : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vxor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -45370,7 +45731,7 @@ def V6_vxor_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vxor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -45383,7 +45744,7 @@ def V6_vzb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uh = vzxt($Vu32.ub)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45395,7 +45756,7 @@ def V6_vzb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uh = vzxt($Vu32.ub)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45431,7 +45792,7 @@ def V6_vzh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uw = vzxt($Vu32.uh)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45443,7 +45804,7 @@ def V6_vzh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uw = vzxt($Vu32.uh)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45479,7 +45840,7 @@ def Y2_barrier : HInst<
(outs),
(ins),
"barrier",
-ST_tc_3stall_SLOT0, TypeST>, Enc_0 {
+tc_ef2676fd, TypeST>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b1010100000000000;
let isSoloAX = 1;
@@ -45489,7 +45850,7 @@ def Y2_break : HInst<
(outs),
(ins),
"brkpt",
-CR_tc_3x_SLOT3, TypeCR>, Enc_0 {
+tc_bcf0e36e, TypeCR>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b0110110000100000;
let isSolo = 1;
@@ -45498,7 +45859,7 @@ def Y2_dccleana : HInst<
(outs),
(ins IntRegs:$Rs32),
"dccleana($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000000;
let isSoloAin1 = 1;
@@ -45507,7 +45868,7 @@ def Y2_dccleaninva : HInst<
(outs),
(ins IntRegs:$Rs32),
"dccleaninva($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000010;
let isSoloAin1 = 1;
@@ -45516,7 +45877,7 @@ def Y2_dcfetch : HInst<
(outs),
(ins IntRegs:$Rs32),
"dcfetch($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_34e882a4, TypeMAPPING> {
let hasSideEffects = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -45525,7 +45886,7 @@ def Y2_dcfetchbo : HInst<
(outs),
(ins IntRegs:$Rs32, u11_3Imm:$Ii),
"dcfetch($Rs32+#$Ii)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4983213 {
+tc_ef0ebaaa, TypeLD>, Enc_2d829e {
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10010100000;
let addrMode = BaseImmOffset;
@@ -45535,7 +45896,7 @@ def Y2_dcinva : HInst<
(outs),
(ins IntRegs:$Rs32),
"dcinva($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000001;
let isSoloAin1 = 1;
@@ -45544,17 +45905,17 @@ def Y2_dczeroa : HInst<
(outs),
(ins IntRegs:$Rs32),
"dczeroa($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000110;
-let mayStore = 1;
let isSoloAin1 = 1;
+let mayStore = 1;
}
def Y2_icinva : HInst<
(outs),
(ins IntRegs:$Rs32),
"icinva($Rs32)",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_049dfb74, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010110110;
let isSolo = 1;
@@ -45563,7 +45924,7 @@ def Y2_isync : HInst<
(outs),
(ins),
"isync",
-J_tc_2early_SLOT2, TypeJ>, Enc_0 {
+tc_d267fa19, TypeJ>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000010;
let Inst{31-16} = 0b0101011111000000;
let isSolo = 1;
@@ -45572,7 +45933,7 @@ def Y2_syncht : HInst<
(outs),
(ins),
"syncht",
-ST_tc_ld_SLOT0, TypeST>, Enc_0 {
+tc_ef2676fd, TypeST>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b1010100001000000;
let isSolo = 1;
@@ -45581,7 +45942,7 @@ def Y4_l2fetch : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"l2fetch($Rs32,$Rt32)",
-ST_tc_3stall_SLOT0, TypeST>, Enc_14620934 {
+tc_f4608adc, TypeST>, Enc_ca3887 {
let Inst{7-0} = 0b00000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100110000;
@@ -45593,7 +45954,7 @@ def Y4_trace : HInst<
(outs),
(ins IntRegs:$Rs32),
"trace($Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_11704059 {
+tc_4997da4a, TypeCR>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01100010010;
let isSoloAX = 1;
@@ -45602,7 +45963,7 @@ def Y5_l2fetch : HInst<
(outs),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"l2fetch($Rs32,$Rtt32)",
-ST_tc_3stall_SLOT0, TypeST>, Enc_8943121, Requires<[HasV5T]> {
+tc_f4608adc, TypeST>, Enc_e6abcf, Requires<[HasV5T]> {
let Inst{7-0} = 0b00000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100110100;
@@ -45614,31 +45975,33 @@ def dep_A2_addsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32):sat:deprecated",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def dep_A2_subsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32):sat:deprecated",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def dep_S2_packhl : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = packhl($Rs32,$Rt32):deprecated",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_1997594 {
+tc_9c18c9a5, TypeALU64>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010100000;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonDepTimingClasses.h b/contrib/llvm/lib/Target/Hexagon/HexagonDepTimingClasses.h
new file mode 100644
index 000000000000..52963034543d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonDepTimingClasses.h
@@ -0,0 +1,132 @@
+//===--- HexagonDepTimingClasses.h ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+static bool is_TC3x(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_1000eb10:
+ case Hexagon::Sched::tc_2aaab1e0:
+ case Hexagon::Sched::tc_4997da4a:
+ case Hexagon::Sched::tc_5d806107:
+ case Hexagon::Sched::tc_6264c5e0:
+ case Hexagon::Sched::tc_69bb508b:
+ case Hexagon::Sched::tc_8c8041e6:
+ case Hexagon::Sched::tc_8cb685d9:
+ case Hexagon::Sched::tc_a12a5971:
+ case Hexagon::Sched::tc_ae0722f7:
+ case Hexagon::Sched::tc_ae2c2dc2:
+ case Hexagon::Sched::tc_bc5561d8:
+ case Hexagon::Sched::tc_d6a805a8:
+ case Hexagon::Sched::tc_f055fbb6:
+ case Hexagon::Sched::tc_feb4974b:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC2early(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_35fb9d13:
+ case Hexagon::Sched::tc_cbe45117:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC4x(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_09c86199:
+ case Hexagon::Sched::tc_2d1e6f5c:
+ case Hexagon::Sched::tc_2e55aa16:
+ case Hexagon::Sched::tc_3bea1824:
+ case Hexagon::Sched::tc_e836c161:
+ case Hexagon::Sched::tc_f1aa2cdb:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC2(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_090485bb:
+ case Hexagon::Sched::tc_1fe8323c:
+ case Hexagon::Sched::tc_37326008:
+ case Hexagon::Sched::tc_3c10f809:
+ case Hexagon::Sched::tc_47ab9233:
+ case Hexagon::Sched::tc_485bb57c:
+ case Hexagon::Sched::tc_511f28f6:
+ case Hexagon::Sched::tc_583510c7:
+ case Hexagon::Sched::tc_63cd9d2d:
+ case Hexagon::Sched::tc_76c4c5ef:
+ case Hexagon::Sched::tc_7ca2ea10:
+ case Hexagon::Sched::tc_87601822:
+ case Hexagon::Sched::tc_88fa2da6:
+ case Hexagon::Sched::tc_94e6ffd9:
+ case Hexagon::Sched::tc_ab1b5e74:
+ case Hexagon::Sched::tc_b0f50e3c:
+ case Hexagon::Sched::tc_bd16579e:
+ case Hexagon::Sched::tc_c0cd91a8:
+ case Hexagon::Sched::tc_ca280e8b:
+ case Hexagon::Sched::tc_cd321066:
+ case Hexagon::Sched::tc_d95f4e98:
+ case Hexagon::Sched::tc_e17ce9ad:
+ case Hexagon::Sched::tc_f1240c08:
+ case Hexagon::Sched::tc_faab1248:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC1(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_07ac815d:
+ case Hexagon::Sched::tc_1b6011fb:
+ case Hexagon::Sched::tc_1b834fe7:
+ case Hexagon::Sched::tc_1e062b18:
+ case Hexagon::Sched::tc_1f9668cc:
+ case Hexagon::Sched::tc_43068634:
+ case Hexagon::Sched::tc_47f0b7ad:
+ case Hexagon::Sched::tc_537e2013:
+ case Hexagon::Sched::tc_548f402d:
+ case Hexagon::Sched::tc_5fa2857c:
+ case Hexagon::Sched::tc_5fe9fcd0:
+ case Hexagon::Sched::tc_78b3c689:
+ case Hexagon::Sched::tc_7c2dcd4d:
+ case Hexagon::Sched::tc_81a23d44:
+ case Hexagon::Sched::tc_821c4233:
+ case Hexagon::Sched::tc_92d1833c:
+ case Hexagon::Sched::tc_9a13af9d:
+ case Hexagon::Sched::tc_9c18c9a5:
+ case Hexagon::Sched::tc_9df8b0dc:
+ case Hexagon::Sched::tc_9f518242:
+ case Hexagon::Sched::tc_a1fb80e1:
+ case Hexagon::Sched::tc_a333d2a9:
+ case Hexagon::Sched::tc_a87879e8:
+ case Hexagon::Sched::tc_aad55963:
+ case Hexagon::Sched::tc_b08b653e:
+ case Hexagon::Sched::tc_b324366f:
+ case Hexagon::Sched::tc_b5bfaa60:
+ case Hexagon::Sched::tc_b86c7e8b:
+ case Hexagon::Sched::tc_c58f771a:
+ case Hexagon::Sched::tc_d108a090:
+ case Hexagon::Sched::tc_d1b5a4b6:
+ case Hexagon::Sched::tc_d2609065:
+ case Hexagon::Sched::tc_d63b71d1:
+ case Hexagon::Sched::tc_e2c31426:
+ case Hexagon::Sched::tc_e8c7a357:
+ case Hexagon::Sched::tc_eb07ef6f:
+ case Hexagon::Sched::tc_f16d5b17:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIICHVX.td b/contrib/llvm/lib/Target/Hexagon/HexagonIICHVX.td
index 4081a225832b..1493d52f08e8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonIICHVX.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIICHVX.td
@@ -7,96 +7,12 @@
//
//===----------------------------------------------------------------------===//
-//
-// Though all these itinerary classes exist for V60 onwards, they are being
-// listed here as 'HVXV62Itin' because itinerary class description prior to V62
-// doesn't include operand cycle info. In future, I plan to merge them
-// together and call it 'HVXItin'.
-//
-class HVXV62Itin {
- list<InstrItinData> HVXV62Itin_list = [
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23,
- [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_SLOT23,
- [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CVI_VA, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE,CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VA_DV, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF, CVI_MPY01]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_LATE, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_SLOT2, [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_SLOT2_LONG_EARLY,
- [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_EARLY, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_LONG_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_DV, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VS, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VINLANESAT, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_LD, [InstrStage<1, [SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_TMP_LD, [InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD]>],[1, 1, 1, 1, 10]>,
- InstrItinData<CVI_VM_CUR_LD, [InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_VP_LDU, [InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_ST, [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_NEW_ST, [InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [CVI_ST]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_STU, [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_HIST, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_ALL]>], [1, 1, 1, 1]>];
+def CVI_VA : InstrItinClass;
+
+class HVXItin {
+ list<InstrItinData> HVXItin_list = [
+ InstrItinData<CVI_VA,
+ [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE,CVI_SHIFT, CVI_MPY0, CVI_MPY1]>],
+ [9, 7, 7, 7], [HVX_FWD, HVX_FWD, HVX_FWD]>];
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIICScalar.td b/contrib/llvm/lib/Target/Hexagon/HexagonIICScalar.td
index e69cfbdad688..5fe713346e38 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonIICScalar.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIICScalar.td
@@ -11,154 +11,22 @@
// classes as per V62. Curretnly, they are just extracted from
// HexagonScheduleV62.td but will soon be auto-generated by HexagonGen.py.
+class PseudoItin {
+ list<InstrItinData> PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>], [2]>
+ ];
+}
+
class ScalarItin {
list<InstrItinData> ScalarItin_list = [
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123, [InstrStage<1,
- [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [4, 1]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [3, 1]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [3, 1, 1]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [3, 1, 1]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60.
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
- [3, 1, 1, 1]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
- [2]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
-
- // Duplex and Compound
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
- // Misc
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDOM , [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>];
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [3, 1], [Hex_FWD, Hex_FWD]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1], [Hex_FWD, Hex_FWD, Hex_FWD]>
+ ];
}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
index 709d64585c0b..636a439ba6a9 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -188,30 +188,10 @@ class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-class PseudoLDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-
class CONSTLDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : PseudoLDInst<outs, ins, asmstr, pattern, cstr>;
-
-// LD Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayLoad = 1 in
-class LD0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
+ string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-let mayLoad = 1 in
-class LD1Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>;
-
// ST Instruction Class in V2/V3 can take SLOT0 only.
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
@@ -220,124 +200,9 @@ class STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>, OpcodeHexagon;
-let mayStore = 1 in
-class STInst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
-
-class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayStore = 1 in
-class ST0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_ld_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>, OpcodeHexagon;
-
-// Same as ST0Inst but doesn't derive from OpcodeHexagon.
-let mayStore = 1 in
-class ST1Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
-
-// ST Instruction Class in V2/V3 can take SLOT0 only.
-// ST Instruction Class in V4 can take SLOT0 & SLOT1.
-// Definition of the instruction class CHANGED from V2/V3 to V4.
-class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
- : STInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// ALU64 Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
-class ALU64Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU64>,
- OpcodeHexagon;
-
-// ALU64 Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
-class ALU64Inst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU64>;
-
-
-class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-
-// M Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
-class MInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeM>,
- OpcodeHexagon;
-
-// Same as above but doesn't derive from OpcodeHexagon
-class MInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeM>;
-
-// M Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
-class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_2_SLOT23>
- : MInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// S Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
-class SInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>,
- OpcodeHexagon;
-
-class SInst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>;
-
-class SInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>;
-
-// S Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
-class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_3op_tc_1_SLOT23>
- : SInst<outs, ins, asmstr, pattern, cstr, itin> {
- let Type = TypeS_3op;
-}
-
-// J Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class JInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
-
-class JInst_CJUMP_UCJUMP<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
-
-// CR Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CR_tc_2early_SLOT3>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCR>, OpcodeHexagon;
-
let isCodeGenOnly = 1, isPseudo = 1 in
class Endloop<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_SLOT0123>
+ string cstr = "", InstrItinClass itin = tc_ENDLOOP>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeENDLOOP>,
OpcodeHexagon;
@@ -357,27 +222,6 @@ class PseudoM<dag outs, dag ins, string asmstr, list<dag> pattern = [],
// Instruction Classes Definitions -
//===----------------------------------------------------------------------===//
-//
-// ALU64 patterns.
-//
-class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Post increment ST Instruction.
-class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
-// Post increment LD Instruction.
-class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
//===----------------------------------------------------------------------===//
// V4 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
@@ -385,7 +229,7 @@ class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
include "HexagonInstrFormatsV4.td"
//===----------------------------------------------------------------------===//
-// V4 Instruction Format Definitions +
+// V55 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -395,5 +239,5 @@ include "HexagonInstrFormatsV4.td"
include "HexagonInstrFormatsV60.td"
//===----------------------------------------------------------------------===//
-// V60 Instruction Format Definitions +
+// V62 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index 1fdf930c62fd..c5fa25995212 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -1,4 +1,4 @@
-//==- HexagonInstrFormats.td - Hexagon Instruction Formats --*- tablegen -*-==//
+//==- HexagonInstrFormatsV4.td - Hexagon Instruction Formats --*- tablegen -==//
//
// The LLVM Compiler Infrastructure
//
@@ -85,64 +85,3 @@ class InstDuplex<bits<4> iClass, list<dag> pattern = [],
bits<2> opExtentAlign = 0;
let TSFlags{28-27} = opExtentAlign; // Alignment exponent before extending.
}
-
-//----------------------------------------------------------------------------//
-// Instruction Classes Definitions
-//----------------------------------------------------------------------------//
-
-//
-// NV type instructions.
-//
-class NVInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeNCJ>, OpcodeHexagon;
-
-class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Definition of Post increment new value store.
-class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Post increment ST Instruction.
-let mayStore = 1 in
-class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// New-value conditional branch.
-class NCJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : NVInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayLoad = 1, mayStore = 1 in
-class MEMInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeV4LDST>,
- OpcodeHexagon;
-
-class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
- : MEMInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-class EXTENDERInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
- : InstHexagon<outs, ins, asmstr, pattern, "", EXTENDER_tc_1_SLOT0123,
- TypeEXTENDER>, OpcodeHexagon;
-
-class SUBInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypeDUPLEX>,
- OpcodeHexagon;
-
-class CJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>,
- OpcodeHexagon;
-
-class CJInst_JMPSET<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND, TypeCJ>,
- OpcodeHexagon;
-
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV60.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV60.td
index b913727972e5..14bda0e0107d 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV60.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV60.td
@@ -20,183 +20,3 @@ class CVI_VA_Resource<dag outs, dag ins, string asmstr,
InstrItinClass itin = CVI_VA>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA>,
OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VA_DV_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VA_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource_late<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_LATE>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Slot2_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV_SLOT2>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_early<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_EARLY>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_long_early<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_LONG_EARLY>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VS_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VS>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VINLANESAT_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VINLANESAT>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VINLANESAT>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VS_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VS>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_LD_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_LD_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_TMP_LD_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_TMP_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_TMP_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_TMP_LD_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_TMP_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_TMP_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_VP_LDU_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_VP_LDU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_VP_LDU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_VP_LDU_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_VP_LDU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_VP_LDU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_ST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_ST_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_NEW_ST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_NEW_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_NEW_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_NEW_ST_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_NEW_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_NEW_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_STU_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_STU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_STU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_STU_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_STU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_STU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_HIST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_HIST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_HIST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VA_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VA>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_HIST_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_HIST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_HIST>,
- Requires<[HasV60T, UseHVX]>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 852bfb1b4f54..03794511414e 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -59,6 +59,7 @@ using namespace llvm;
#define GET_INSTRMAP_INFO
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
+#include "HexagonDepTimingClasses.h"
cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
@@ -1466,7 +1467,15 @@ bool HexagonInstrInfo::DefinesPredicate(
}
bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
- return MI.getDesc().isPredicable();
+ if (!MI.getDesc().isPredicable())
+ return false;
+
+ if (MI.isCall() || isTailCall(MI)) {
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ if (!MF.getSubtarget<HexagonSubtarget>().usePredicatedCalls())
+ return false;
+ }
+ return true;
}
bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
@@ -1643,6 +1652,7 @@ unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return getInstrTimingClassLatency(ItinData, MI);
}
+
DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
const TargetSubtargetInfo &STI) const {
const InstrItineraryData *II = STI.getInstrItineraryData();
@@ -2047,9 +2057,7 @@ bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const {
// Multiply
unsigned SchedClass = MI.getDesc().getSchedClass();
- if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23)
- return true;
- return false;
+ return is_TC4x(SchedClass) || is_TC3x(SchedClass);
}
bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
@@ -2117,7 +2125,7 @@ bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
// No V60 HVX VMEM with A_INDIRECT.
bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
const MachineInstr &J) const {
- if (!isV60VectorInstruction(I))
+ if (!isHVXVec(I))
return false;
if (!I.mayLoad() && !I.mayStore())
return false;
@@ -2241,30 +2249,13 @@ bool HexagonInstrInfo::isLateResultInstr(const MachineInstr &MI) const {
}
unsigned SchedClass = MI.getDesc().getSchedClass();
-
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
- case Hexagon::Sched::ALU64_tc_1_SLOT23:
- case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
- case Hexagon::Sched::S_2op_tc_1_SLOT23:
- case Hexagon::Sched::S_3op_tc_1_SLOT23:
- case Hexagon::Sched::V2LDST_tc_ld_SLOT01:
- case Hexagon::Sched::V2LDST_tc_st_SLOT0:
- case Hexagon::Sched::V2LDST_tc_st_SLOT01:
- case Hexagon::Sched::V4LDST_tc_ld_SLOT01:
- case Hexagon::Sched::V4LDST_tc_st_SLOT0:
- case Hexagon::Sched::V4LDST_tc_st_SLOT01:
- return false;
- }
- return true;
+ return !is_TC1(SchedClass);
}
bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
// Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
// resource, but all operands can be received late like an ALU instruction.
- return MI.getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE;
+ return getType(MI) == HexagonII::TypeCVI_VX_LATE;
}
bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
@@ -2507,61 +2498,22 @@ bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
// Returns true when SU has a timing class TC1.
bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
- case Hexagon::Sched::ALU64_tc_1_SLOT23:
- case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
- //case Hexagon::Sched::M_tc_1_SLOT23:
- case Hexagon::Sched::S_2op_tc_1_SLOT23:
- case Hexagon::Sched::S_3op_tc_1_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC1(SchedClass);
}
bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123:
- case Hexagon::Sched::ALU64_tc_2_SLOT23:
- case Hexagon::Sched::CR_tc_2_SLOT3:
- case Hexagon::Sched::M_tc_2_SLOT23:
- case Hexagon::Sched::S_2op_tc_2_SLOT23:
- case Hexagon::Sched::S_3op_tc_2_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC2(SchedClass);
}
bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123:
- case Hexagon::Sched::ALU64_tc_2early_SLOT23:
- case Hexagon::Sched::CR_tc_2early_SLOT23:
- case Hexagon::Sched::CR_tc_2early_SLOT3:
- case Hexagon::Sched::J_tc_2early_SLOT0123:
- case Hexagon::Sched::J_tc_2early_SLOT2:
- case Hexagon::Sched::J_tc_2early_SLOT23:
- case Hexagon::Sched::S_2op_tc_2early_SLOT23:
- case Hexagon::Sched::S_3op_tc_2early_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC2early(SchedClass);
}
bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23;
+ return is_TC4x(SchedClass);
}
// Schedule this ASAP.
@@ -2583,7 +2535,7 @@ bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
return false;
}
-bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr &MI) const {
+bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
const uint64_t V = getType(MI);
return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
}
@@ -2782,7 +2734,7 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
}
bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
- return isV60VectorInstruction(MI) && isAccumulator(MI);
+ return isHVXVec(MI) && isAccumulator(MI);
}
bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
@@ -2888,7 +2840,7 @@ bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
// Add latency to instruction.
bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
const MachineInstr &MI2) const {
- if (isV60VectorInstruction(MI1) && isV60VectorInstruction(MI2))
+ if (isHVXVec(MI1) && isHVXVec(MI2))
if (!isVecUsableNextPacket(MI1, MI2))
return true;
return false;
@@ -3013,7 +2965,7 @@ bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
const MachineInstr &ConsMI) const {
// There is no stall when ProdMI is not a V60 vector.
- if (!isV60VectorInstruction(ProdMI))
+ if (!isHVXVec(ProdMI))
return false;
// There is no stall when ProdMI and ConsMI are not dependent.
@@ -3031,7 +2983,7 @@ bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
MachineBasicBlock::const_instr_iterator BII) const {
// There is no stall when I is not a V60 vector.
- if (!isV60VectorInstruction(MI))
+ if (!isHVXVec(MI))
return false;
MachineBasicBlock::const_instr_iterator MII = BII;
@@ -3415,7 +3367,6 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
// p.old store
// [if (p0)memw(R0+#0)=R2]
//
-//
// The following set of instructions further explains the scenario where
// conditional new-value store becomes invalid when promoted to .new predicate
// form.
@@ -4025,18 +3976,53 @@ unsigned HexagonInstrInfo::getInstrTimingClassLatency(
if (!ItinData)
return getInstrLatency(ItinData, MI);
- // Get the latency embedded in the itinerary. If we're not using timing class
- // latencies or if we using BSB scheduling, then restrict the maximum latency
- // to 1 (that is, either 0 or 1).
if (MI.isTransient())
return 0;
- unsigned Latency = ItinData->getStageLatency(MI.getDesc().getSchedClass());
- if (!EnableTimingClassLatency ||
- MI.getParent()->getParent()->getSubtarget<HexagonSubtarget>().
- useBSBScheduling())
- if (Latency > 1)
- Latency = 1;
- return Latency;
+ return ItinData->getStageLatency(MI.getDesc().getSchedClass());
+}
+
+/// getOperandLatency - Compute and return the use operand latency of a given
+/// pair of def and use.
+/// In most cases, the static scheduling itinerary was enough to determine the
+/// operand latency. But it may not be possible for instructions with variable
+/// number of defs / uses.
+///
+/// This is a raw interface to the itinerary that may be directly overriden by
+/// a target. Use computeOperandLatency to get the best estimate of latency.
+int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
+ auto &RI = getRegisterInfo();
+ // Get DefIdx and UseIdx for super registers.
+ MachineOperand DefMO = DefMI.getOperand(DefIdx);
+
+ if (RI.isPhysicalRegister(DefMO.getReg())) {
+ if (DefMO.isImplicit()) {
+ for (MCSuperRegIterator SR(DefMO.getReg(), &RI); SR.isValid(); ++SR) {
+ int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &RI);
+ if (Idx != -1) {
+ DefIdx = Idx;
+ break;
+ }
+ }
+ }
+
+ MachineOperand UseMO = UseMI.getOperand(UseIdx);
+ if (UseMO.isImplicit()) {
+ for (MCSuperRegIterator SR(UseMO.getReg(), &RI); SR.isValid(); ++SR) {
+ int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &RI);
+ if (Idx != -1) {
+ UseIdx = Idx;
+ break;
+ }
+ }
+ }
+ }
+
+ return TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
+ UseMI, UseIdx);
}
// inverts the predication logic.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 21b4f738f6e8..97b9bc954688 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -288,6 +288,19 @@ public:
/// If the instruction is an increment of a constant value, return the amount.
bool getIncrementValue(const MachineInstr &MI, int &Value) const override;
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// pair of def and use.
+ /// In most cases, the static scheduling itinerary was enough to determine the
+ /// operand latency. But it may not be possible for instructions with variable
+ /// number of defs / uses.
+ ///
+ /// This is a raw interface to the itinerary that may be directly overriden by
+ /// a target. Use computeOperandLatency to get the best estimate of latency.
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const override;
+
bool isTailCall(const MachineInstr &MI) const override;
/// HexagonInstrInfo specifics.
@@ -356,7 +369,7 @@ public:
bool isTC4x(const MachineInstr &MI) const;
bool isToBeScheduledASAP(const MachineInstr &MI1,
const MachineInstr &MI2) const;
- bool isV60VectorInstruction(const MachineInstr &MI) const;
+ bool isHVXVec(const MachineInstr &MI) const;
bool isValidAutoIncImm(const EVT VT, const int Offset) const;
bool isValidOffset(unsigned Opcode, int Offset, bool Extend = true) const;
bool isVecAcc(const MachineInstr &MI) const;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 20dc9b0da1db..324108284a9a 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -744,7 +744,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
// Give less preference to an instruction that will cause a stall with
// an instruction in the previous packet.
- if (QII.isV60VectorInstruction(Instr)) {
+ if (QII.isHVXVec(Instr)) {
// Check for stalls in the previous packet.
if (Q.getID() == TopQID) {
for (auto J : Top.ResourceModel->OldPacket)
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonPatterns.td b/contrib/llvm/lib/Target/Hexagon/HexagonPatterns.td
index b8c3bf0745ce..32503d111c24 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -1,13 +1,5 @@
// Pattern fragment that combines the value type and the register class
// into a single parameter.
-// The pat frags in the definitions below need to have a named register,
-// otherwise i32 will be assumed regardless of the register class. The
-// name of the register does not matter.
-def I1 : PatLeaf<(i1 PredRegs:$R)>;
-def I32 : PatLeaf<(i32 IntRegs:$R)>;
-def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
-def F32 : PatLeaf<(f32 IntRegs:$R)>;
-def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
// Pattern fragments to extract the low and high subregisters from a
// 64-bit value.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonPseudo.td b/contrib/llvm/lib/Target/Hexagon/HexagonPseudo.td
index 2e8def572c4b..8c2caea2d5c5 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonPseudo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonPseudo.td
@@ -7,6 +7,15 @@
//
//===----------------------------------------------------------------------===//
+// The pat frags in the definitions below need to have a named register,
+// otherwise i32 will be assumed regardless of the register class. The
+// name of the register does not matter.
+def I1 : PatLeaf<(i1 PredRegs:$R)>;
+def I32 : PatLeaf<(i32 IntRegs:$R)>;
+def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
+def F32 : PatLeaf<(f32 IntRegs:$R)>;
+def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
+
let PrintMethod = "printGlobalOperand" in {
def globaladdress : Operand<i32>;
def globaladdressExt : Operand<i32>;
@@ -23,17 +32,20 @@ def DUPLEX_Pseudo : InstHexagon<(outs),
let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
isAsmParserOnly = 1 in
-def TFRI64_V2_ext : ALU64_rr<(outs DoubleRegs:$dst),
- (ins s32_0Imm:$src1, s8_0Imm:$src2),
- "$dst=combine(#$src1,#$src2)">;
+def TFRI64_V2_ext : InstHexagon<(outs DoubleRegs:$dst),
+ (ins s32_0Imm:$src1, s8_0Imm:$src2),
+ "$dst=combine(#$src1,#$src2)", [], "",
+ A2_combineii.Itinerary, TypeALU32_2op>, OpcodeHexagon;
// HI/LO Instructions
let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
hasNewValue = 1, opNewValue = 0 in
-class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
+class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp,
+ InstHexagon rootInst>
: InstHexagon<(outs IntRegs:$dst),
- (ins u16_0Imm:$imm_value),
- "$dst"#RegHalf#"=#$imm_value", [], "", ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, OpcodeHexagon {
+ (ins u16_0Imm:$imm_value),
+ "$dst"#RegHalf#"=#$imm_value", [], "",
+ rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<5> dst;
bits<32> imm_value;
@@ -46,8 +58,8 @@ class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
}
let isAsmParserOnly = 1 in {
- def LO : REG_IMMED<".l", 0b0, 0b001, 0b1>;
- def HI : REG_IMMED<".h", 0b0, 0b010, 0b1>;
+ def LO : REG_IMMED<".l", 0b0, 0b001, 0b1, A2_tfril>;
+ def HI : REG_IMMED<".h", 0b0, 0b010, 0b1, A2_tfrih>;
}
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in {
@@ -59,11 +71,13 @@ let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in {
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def PS_true : SInst<(outs PredRegs:$dst), (ins), "", []>;
+def PS_true : InstHexagon<(outs PredRegs:$dst), (ins), "",
+ [(set I1:$dst, 1)], "", C2_orn.Itinerary, TypeCR>;
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def PS_false : SInst<(outs PredRegs:$dst), (ins), "", []>;
+def PS_false : InstHexagon<(outs PredRegs:$dst), (ins), "",
+ [(set I1:$dst, 0)], "", C2_andn.Itinerary, TypeCR>;
let Defs = [R29, R30], Uses = [R31, R30, R29], isPseudo = 1 in
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
@@ -90,10 +104,10 @@ def ENDLOOP1 : Endloop<(outs), (ins b30_2Imm:$offset),
let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
opExtendable = 0, hasSideEffects = 0 in
-class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
- : CRInst<(outs), (ins brOp:$offset, u10_0Imm:$src2),
+class LOOP_iBase<string mnemonic, InstHexagon rootInst>
+ : InstHexagon <(outs), (ins b30_2Imm:$offset, u10_0Imm:$src2),
#mnemonic#"($offset,#$src2)",
- [], "" , CR_tc_3x_SLOT3> {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<9> offset;
bits<10> src2;
@@ -110,10 +124,10 @@ class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
opExtendable = 0, hasSideEffects = 0 in
-class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
- : CRInst<(outs), (ins brOp:$offset, IntRegs:$src2),
+class LOOP_rBase<string mnemonic, InstHexagon rootInst>
+ : InstHexagon<(outs), (ins b30_2Imm:$offset, IntRegs:$src2),
#mnemonic#"($offset,$src2)",
- [], "" ,CR_tc_3x_SLOT3> {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<9> offset;
bits<5> src2;
@@ -126,27 +140,25 @@ class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
let Inst{4-3} = offset{3-2};
}
-multiclass LOOP_ri<string mnemonic> {
- let isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in {
- def iext: LOOP_iBase<mnemonic, b30_2Imm, 1>;
- def rext: LOOP_rBase<mnemonic, b30_2Imm, 1>;
- }
+let Defs = [SA0, LC0, USR], isCodeGenOnly = 1, isExtended = 1,
+ opExtendable = 0 in {
+ def J2_loop0iext : LOOP_iBase<"loop0", J2_loop0i>;
+ def J2_loop1iext : LOOP_iBase<"loop1", J2_loop1i>;
}
-
-let Defs = [SA0, LC0, USR] in
-defm J2_loop0 : LOOP_ri<"loop0">;
-
// Interestingly only loop0's appear to set usr.lpcfg
-let Defs = [SA1, LC1] in
-defm J2_loop1 : LOOP_ri<"loop1">;
+let Defs = [SA1, LC1], isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in {
+ def J2_loop0rext : LOOP_rBase<"loop0", J2_loop0r>;
+ def J2_loop1rext : LOOP_rBase<"loop1", J2_loop1r>;
+}
let isCall = 1, hasSideEffects = 1, isPredicable = 0,
isExtended = 0, isExtendable = 1, opExtendable = 0,
isExtentSigned = 1, opExtentBits = 24, opExtentAlign = 2 in
class T_Call<string ExtStr>
- : JInst<(outs), (ins a30_2Imm:$dst),
- "call " # ExtStr # "$dst", [], "", J_tc_2early_SLOT23> {
+ : InstHexagon<(outs), (ins a30_2Imm:$dst),
+ "call " # ExtStr # "$dst", [], "", J2_call.Itinerary, TypeJ>,
+ OpcodeHexagon {
let BaseOpcode = "call";
bits<24> dst;
@@ -164,38 +176,24 @@ let isCodeGenOnly = 1, isCall = 1, hasSideEffects = 1,
Defs = [PC, R31, R6, R7, P0] in
def PS_call_stk : T_Call<"">;
-let isCall = 1, hasSideEffects = 1, cofMax1 = 1 in
-class JUMPR_MISC_CALLR<bit isPred, bit isPredNot,
- dag InputDag = (ins IntRegs:$Rs)>
- : JInst<(outs), InputDag,
- !if(isPred, !if(isPredNot, "if (!$Pu) callr $Rs",
- "if ($Pu) callr $Rs"),
- "callr $Rs"),
- [], "", J_tc_2early_SLOT2> {
+// Call, no return.
+let isCall = 1, hasSideEffects = 1, cofMax1 = 1, isCodeGenOnly = 1 in
+def PS_callr_nr: InstHexagon<(outs), (ins IntRegs:$Rs),
+ "callr $Rs", [], "", J2_callr.Itinerary, TypeJ>, OpcodeHexagon {
bits<5> Rs;
bits<2> Pu;
- let isPredicated = isPred;
- let isPredicatedFalse = isPredNot;
+ let isPredicatedFalse = 1;
let IClass = 0b0101;
- let Inst{27-25} = 0b000;
- let Inst{24-23} = !if (isPred, 0b10, 0b01);
- let Inst{22} = 0;
- let Inst{21} = isPredNot;
- let Inst{9-8} = !if (isPred, Pu, 0b00);
+ let Inst{27-21} = 0b0000101;
let Inst{20-16} = Rs;
-
}
-let isCodeGenOnly = 1 in {
- def PS_callr_nr : JUMPR_MISC_CALLR<0, 1>; // Call, no return.
-}
-
let isCall = 1, hasSideEffects = 1,
isExtended = 0, isExtendable = 1, opExtendable = 0, isCodeGenOnly = 1,
- BaseOpcode = "PS_call_nr", isExtentSigned = 1, opExtentAlign = 2,
- Itinerary = J_tc_2early_SLOT23 in
-class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops>
+ BaseOpcode = "PS_call_nr", isExtentSigned = 1, opExtentAlign = 2 in
+class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops,
+ InstrItinClass itin>
: Pseudo<(outs), iops, "">, PredRel {
bits<2> Pu;
bits<17> dst;
@@ -205,16 +203,18 @@ class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops>
let isPredicatedFalse = isFalse;
}
-def PS_call_nr : Call_nr<24, 0, 0, (ins s32_0Imm:$Ii)>;
-//def PS_call_nrt: Call_nr<17, 1, 0, (ins PredRegs:$Pu, s32_0Imm:$dst)>;
-//def PS_call_nrf: Call_nr<17, 1, 1, (ins PredRegs:$Pu, s32_0Imm:$dst)>;
+def PS_call_nr : Call_nr<24, 0, 0, (ins s32_0Imm:$Ii), J2_call.Itinerary>;
+//def PS_call_nrt: Call_nr<17, 1, 0, (ins PredRegs:$Pu, s32_0Imm:$dst),
+// J2_callt.Itinerary>;
+//def PS_call_nrf: Call_nr<17, 1, 1, (ins PredRegs:$Pu, s32_0Imm:$dst),
+// J2_callf.Itinerary>;
let isBranch = 1, isIndirectBranch = 1, isBarrier = 1, Defs = [PC],
isPredicable = 1, hasSideEffects = 0, InputType = "reg",
cofMax1 = 1 in
-class T_JMPr
+class T_JMPr <InstHexagon rootInst>
: InstHexagon<(outs), (ins IntRegs:$dst), "jumpr $dst", [],
- "", J_tc_2early_SLOT2, TypeJ>, OpcodeHexagon {
+ "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<5> dst;
let IClass = 0b0101;
@@ -225,12 +225,12 @@ class T_JMPr
// A return through builtin_eh_return.
let isReturn = 1, isTerminator = 1, isBarrier = 1, hasSideEffects = 0,
isCodeGenOnly = 1, Defs = [PC], Uses = [R28], isPredicable = 0 in
-def EH_RETURN_JMPR : T_JMPr;
+def EH_RETURN_JMPR : T_JMPr<J2_jumpr>;
// Indirect tail-call.
let isPseudo = 1, isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0,
isTerminator = 1, isCodeGenOnly = 1 in
-def PS_tailcall_r : T_JMPr;
+def PS_tailcall_r : T_JMPr<J2_jumpr>;
//
// Direct tail-calls.
@@ -262,11 +262,11 @@ class JumpOpcStr<string Mnemonic, bit New, bit Taken> {
}
let isBranch = 1, isIndirectBranch = 1, Defs = [PC], isPredicated = 1,
hasSideEffects = 0, InputType = "reg", cofMax1 = 1 in
-class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
+class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak, InstHexagon rootInst>
: InstHexagon<(outs), (ins PredRegs:$src, IntRegs:$dst),
CondStr<"$src", !if(PredNot,0,1), isPredNew>.S #
JumpOpcStr<"jumpr", isPredNew, isTak>.S # " $dst",
- [], "", J_tc_2early_SLOT2, TypeJ>, OpcodeHexagon {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
let isTaken = isTak;
let isPredicatedFalse = PredNot;
@@ -283,30 +283,25 @@ class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
let Inst{11} = isPredNew;
let Inst{9-8} = src;
}
-multiclass JMPR_Pred<bit PredNot> {
- def NAME : T_JMPr_c<PredNot, 0, 0>; // not taken
- // Predicate new
- def NAME#newpt : T_JMPr_c<PredNot, 1, 1>; // taken
- def NAME#new : T_JMPr_c<PredNot, 1, 0>; // not taken
-}
-multiclass JMPR_base<string BaseOp> {
- let BaseOpcode = BaseOp in {
- def NAME : T_JMPr;
- defm t : JMPR_Pred<0>;
- defm f : JMPR_Pred<1>;
- }
+
+let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1,
+ isBarrier = 1, BaseOpcode = "JMPret" in {
+ def PS_jmpret : T_JMPr<J2_jumpr>, PredNewRel;
+ def PS_jmprett : T_JMPr_c<0, 0, 0, J2_jumprt>, PredNewRel;
+ def PS_jmpretf : T_JMPr_c<1, 0, 0, J2_jumprf>, PredNewRel;
+ def PS_jmprettnew : T_JMPr_c<0, 1, 0, J2_jumprtnew>, PredNewRel;
+ def PS_jmpretfnew : T_JMPr_c<1, 1, 0, J2_jumprfnew>, PredNewRel;
+ def PS_jmprettnewpt : T_JMPr_c<0, 1, 1, J2_jumprtnewpt>, PredNewRel;
+ def PS_jmpretfnewpt : T_JMPr_c<1, 1, 1, J2_jumprfnewpt>, PredNewRel;
}
-let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1, isBarrier = 1 in
-defm PS_jmpret : JMPR_base<"JMPret">, PredNewRel;
//defm V6_vtran2x2_map : HexagonMapping<(outs VectorRegs:$Vy32, VectorRegs:$Vx32), (ins VectorRegs:$Vx32in, IntRegs:$Rt32), "vtrans2x2(${Vy32},${Vx32},${Rt32})", (V6_vshuff VectorRegs:$Vy32, VectorRegs:$Vx32, VectorRegs:$Vx32in, IntRegs:$Rt32)>;
// The reason for the custom inserter is to record all ALLOCA instructions
// in MachineFunctionInfo.
-let Defs = [R29], isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 1 in
-def PS_alloca: InstHexagon<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, u32_0Imm:$A), "",
- [], "", ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>;
+let Defs = [R29], hasSideEffects = 1 in
+def PS_alloca: Pseudo <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, u32_0Imm:$A), "", []>;
// Load predicate.
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
@@ -322,35 +317,19 @@ def LDriw_mod : LDInst<(outs ModRegs:$dst),
(ins IntRegs:$addr, s32_0Imm:$off),
".error \"should not emit\"", []>;
-// Vector load
-let Predicates = [HasV60T, UseHVX] in
-let mayLoad = 1, hasSideEffects = 0 in
- class V6_LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VM_LD,
- IType type = TypeCVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
-
-// Vector store
-let Predicates = [HasV60T, UseHVX] in
-let mayStore = 1, hasSideEffects = 0 in
-class V6_STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VM_ST,
- IType type = TypeCVI_VM_ST>
-: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
let isCodeGenOnly = 1, isPseudo = 1 in
-def PS_pselect : ALU64_rr<(outs DoubleRegs:$Rd),
+def PS_pselect: InstHexagon<(outs DoubleRegs:$Rd),
(ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
- ".error \"should not emit\" ", []>;
+ ".error \"should not emit\" ", [], "", A2_tfrpt.Itinerary, TypeALU32_2op>;
let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0,
isPredicable = 1,
isExtendable = 1, opExtendable = 0, isExtentSigned = 1,
opExtentBits = 24, opExtentAlign = 2, InputType = "imm" in
-class T_JMP<string ExtStr>
- : JInst_CJUMP_UCJUMP<(outs), (ins b30_2Imm:$dst),
- "jump " # ExtStr # "$dst",
- [], "", J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT> {
+class T_JMP: InstHexagon<(outs), (ins b30_2Imm:$dst),
+ "jump $dst",
+ [], "", J2_jump.Itinerary, TypeJ>, OpcodeHexagon {
bits<24> dst;
let IClass = 0b0101;
@@ -362,16 +341,16 @@ class T_JMP<string ExtStr>
// Restore registers and dealloc return function call.
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC], isPredicable = 0, isAsmParserOnly = 1 in {
- def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP;
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP;
let Defs = [R14, R15, R28, R29, R30, R31, PC] in {
- def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP;
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP;
}
}
@@ -416,33 +395,38 @@ let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in {
def SAVE_REGISTERS_CALL_V4STK_EXT_PIC : T_Call<"">, PredRel;
}
-// Vector load/store pseudos
+// Vector store pseudos
+let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
+ mayStore = 1, hasSideEffects = 0 in
+class STrivv_template<RegisterClass RC, InstHexagon rootInst>
+ : InstHexagon<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src),
+ "", [], "", rootInst.Itinerary, rootInst.Type>;
-let isPseudo = 1, isCodeGenOnly = 1 in
-class STrivv_template<RegisterClass RC>
- : V6_STInst<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src), "", []>;
-
-def PS_vstorerw_ai: STrivv_template<VecDblRegs>,
- Requires<[HasV60T,UseHVXSgl]>;
-def PS_vstorerwu_ai: STrivv_template<VecDblRegs>,
+def PS_vstorerw_ai: STrivv_template<VecDblRegs, V6_vS32b_ai>,
Requires<[HasV60T,UseHVXSgl]>;
-def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B>,
- Requires<[HasV60T,UseHVXDbl]>;
-def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B>,
+def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vstorerwu_ai: STrivv_template<VecDblRegs, V6_vS32Ub_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32Ub_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
-let isPseudo = 1, isCodeGenOnly = 1 in
-class LDrivv_template<RegisterClass RC>
- : V6_LDInst<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off), "", []>;
+// Vector load pseudos
+let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
+ mayLoad = 1, hasSideEffects = 0 in
+class LDrivv_template<RegisterClass RC, InstHexagon rootInst>
+ : InstHexagon<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off),
+ "", [], "", rootInst.Itinerary, rootInst.Type>;
-def PS_vloadrw_ai: LDrivv_template<VecDblRegs>,
- Requires<[HasV60T,UseHVXSgl]>;
-def PS_vloadrwu_ai: LDrivv_template<VecDblRegs>,
+def PS_vloadrw_ai: LDrivv_template<VecDblRegs, V6_vL32b_ai>,
Requires<[HasV60T,UseHVXSgl]>;
-def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B>,
+def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
-def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B>,
+
+def PS_vloadrwu_ai: LDrivv_template<VecDblRegs, V6_vL32Ub_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32Ub_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
// Store vector predicate pseudo.
@@ -469,25 +453,23 @@ let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
Requires<[HasV60T,UseHVXDbl]>;
}
-class VSELInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VA_DV,
- IType type = TypeCVI_VA_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
-
-let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in {
- def PS_vselect: VSELInst<(outs VectorRegs:$dst),
- (ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3), "", []>,
- Requires<[HasV60T,UseHVXSgl]>;
- def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
- (ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
- "", []>, Requires<[HasV60T,UseHVXDbl]>;
- def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
- (ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3), "", []>,
- Requires<[HasV60T,UseHVXSgl]>;
- def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
- (ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
- "", []>, Requires<[HasV60T,UseHVXDbl]>;
-}
+let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+class VSELInst<dag outs, dag ins, InstHexagon rootInst>
+ : InstHexagon<outs, ins, "", [], "", rootInst.Itinerary, rootInst.Type>;
+
+def PS_vselect: VSELInst<(outs VectorRegs:$dst),
+ (ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
+ V6_vcmov>, Requires<[HasV60T,UseHVXSgl]>;
+def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
+ (ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
+ V6_vcmov>, Requires<[HasV60T,UseHVXDbl]>;
+
+def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
+ (ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3),
+ V6_vccombine>, Requires<[HasV60T,UseHVXSgl]>;
+def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
+ (ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
+ V6_vccombine>, Requires<[HasV60T,UseHVXDbl]>;
// Store predicate.
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
@@ -504,8 +486,10 @@ def STriw_mod : STInst<(outs),
let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
isAsmParserOnly = 1 in
-def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u64_0Imm:$src1),
- "$dst = #$src1">;
+def TFRI64_V4 : InstHexagon<(outs DoubleRegs:$dst),
+ (ins u64_0Imm:$src1),
+ "$dst = #$src1", [], "",
+ A2_combineii.Itinerary, TypeALU32_2op>, OpcodeHexagon;
// Hexagon doesn't have a vector multiply with C semantics.
// Instead, generate a pseudo instruction that gets expaneded into two
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
index 2519b7c40062..45dbb3a6d218 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -122,12 +122,6 @@ let Namespace = "Hexagon" in {
def P2 : Rp<2, "p2">, DwarfRegNum<[65]>;
def P3 : Rp<3, "p3">, DwarfRegNum<[66]>;
- // Modifier registers.
- // C6 and C7 can also be M0 and M1, but register names must be unique, even
- // if belonging to different register classes.
- def M0 : Mx<0, "m0">, DwarfRegNum<[72]>;
- def M1 : Mx<1, "m1">, DwarfRegNum<[73]>;
-
// Fake register to represent USR.OVF bit. Artihmetic/saturating instruc-
// tions modify this bit, and multiple such instructions are allowed in the
// same packet. We need to ignore output dependencies on this bit, but not
@@ -149,8 +143,8 @@ let Namespace = "Hexagon" in {
// When defining more Cn registers, make sure to explicitly mark them
// as reserved in HexagonRegisterInfo.cpp.
def C5: Rc<5, "c5", ["c5"]>, DwarfRegNum<[72]>;
- def C6: Rc<6, "c6", [], [M0]>, DwarfRegNum<[73]>;
- def C7: Rc<7, "c7", [], [M1]>, DwarfRegNum<[74]>;
+ def M0: Rc<6, "m0", ["c6"]>, DwarfRegNum<[73]>;
+ def M1: Rc<7, "m1", ["c7"]>, DwarfRegNum<[74]>;
// Define C8 separately and make it aliased with USR.
// The problem is that USR has subregisters (e.g. overflow). If USR was
// specified as a subregister of C9_8, it would imply that subreg_overflow
@@ -177,7 +171,7 @@ let Namespace = "Hexagon" in {
def C1_0: Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>;
def C3_2: Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>;
def C5_4: Rcc<4, "c5:4", [P3_0, C5]>, DwarfRegNum<[71]>;
- def C7_6: Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>;
+ def C7_6: Rcc<6, "c7:6", [M0, M1], ["m1:0"]>, DwarfRegNum<[72]>;
// Use C8 instead of USR as a subregister of C9_8.
def C9_8: Rcc<8, "c9:8", [C8, PC]>, DwarfRegNum<[74]>;
def C11_10: Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>;
@@ -280,8 +274,8 @@ def ModRegs : RegisterClass<"Hexagon", [i32], 32, (add M0, M1)>;
let Size = 32, isAllocatable = 0 in
def CtrRegs : RegisterClass<"Hexagon", [i32], 32,
- (add LC0, SA0, LC1, SA1, P3_0, C5, C6, C7,
- C8, PC, UGP, GP, CS0, CS1, UPCYCLELO, UPCYCLEHI,
+ (add LC0, SA0, LC1, SA1, P3_0, C5, C8, PC, UGP, GP, CS0, CS1,
+ UPCYCLELO, UPCYCLEHI,
FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI, UTIMERLO, UTIMERHI,
M0, M1, USR)>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
index 9b5fbea04d18..ffee03e72639 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
@@ -7,6 +7,55 @@
//
//===----------------------------------------------------------------------===//
+def Hex_FWD : Bypass;
+def HVX_FWD : Bypass;
+
+// Functional Units.
+def SLOT0 : FuncUnit;
+def SLOT1 : FuncUnit;
+def SLOT2 : FuncUnit;
+def SLOT3 : FuncUnit;
+// Endloop is a pseudo instruction that is encoded with 2 bits in a packet
+// rather than taking an execution slot. This special unit is needed
+// to schedule an ENDLOOP with 4 other instructions.
+def SLOT_ENDLOOP: FuncUnit;
+
+// CVI pipes from the "Hexagon Multimedia Co-Processor Extensions Arch Spec".
+def CVI_ST : FuncUnit;
+def CVI_XLANE : FuncUnit;
+def CVI_SHIFT : FuncUnit;
+def CVI_MPY0 : FuncUnit;
+def CVI_MPY1 : FuncUnit;
+def CVI_LD : FuncUnit;
+
+// Combined functional units.
+def CVI_XLSHF : FuncUnit;
+def CVI_MPY01 : FuncUnit;
+def CVI_ALL : FuncUnit;
+def CVI_ALL_NOMEM : FuncUnit;
+
+// Combined functional unit data.
+def HexagonComboFuncsV60 :
+ ComboFuncUnits<[
+ ComboFuncData<CVI_XLSHF , [CVI_XLANE, CVI_SHIFT]>,
+ ComboFuncData<CVI_MPY01 , [CVI_MPY0, CVI_MPY1]>,
+ ComboFuncData<CVI_ALL , [CVI_ST, CVI_XLANE, CVI_SHIFT,
+ CVI_MPY0, CVI_MPY1, CVI_LD]>,
+ ComboFuncData<CVI_ALL_NOMEM, [CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1]>
+ ]>;
+
+// Itinerary classes.
+def PSEUDO : InstrItinClass;
+def PSEUDOM : InstrItinClass;
+def DUPLEX : InstrItinClass;
+def tc_ENDLOOP : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Auto-generated itinerary classes
+//===----------------------------------------------------------------------===//
+include "HexagonDepIICScalar.td"
+include "HexagonDepIICHVX.td"
+
//===----------------------------------------------------------------------===//
// V4 Machine Info +
//===----------------------------------------------------------------------===//
@@ -20,9 +69,9 @@ include "HexagonScheduleV55.td"
// V60 Machine Info -
//===----------------------------------------------------------------------===//
-include "HexagonScheduleV60.td"
include "HexagonIICScalar.td"
include "HexagonIICHVX.td"
+include "HexagonScheduleV60.td"
//===----------------------------------------------------------------------===//
// V62 Machine Info +
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
index 880cc0a02b6a..69b704a805b8 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -7,200 +7,31 @@
//
//===----------------------------------------------------------------------===//
-// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
-// This file describes that machine information.
-
-//
-// |===========|==================================================|
-// | PIPELINE | Instruction Classes |
-// |===========|==================================================|
-// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
-// |-----------|--------------------------------------------------|
-// | SLOT1 | LD ST ALU32 |
-// |-----------|--------------------------------------------------|
-// | SLOT2 | XTYPE ALU32 J JR |
-// |-----------|--------------------------------------------------|
-// | SLOT3 | XTYPE ALU32 J CR |
-// |===========|==================================================|
-
-// Functional Units.
-def SLOT0 : FuncUnit;
-def SLOT1 : FuncUnit;
-def SLOT2 : FuncUnit;
-def SLOT3 : FuncUnit;
-// Endloop is a pseudo instruction that is encoded with 2 bits in a packet
-// rather than taking an execution slot. This special unit is needed
-// to schedule an ENDLOOP with 4 other instructions.
-def SLOT_ENDLOOP: FuncUnit;
-
-// Itinerary classes.
-def PSEUDO : InstrItinClass;
-def PSEUDOM : InstrItinClass;
-// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
-def DUPLEX : InstrItinClass;
-def PREFIX : InstrItinClass;
-def COMPOUND_CJ_ARCHDEPSLOT : InstrItinClass;
-def COMPOUND : InstrItinClass;
+def LD_tc_ld_SLOT01 : InstrItinClass;
+def ST_tc_st_SLOT01 : InstrItinClass;
+
+class HexagonV4PseudoItin {
+ list<InstrItinData> V4PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>]>
+ ];
+}
-def ALU32_2op_tc_1_SLOT0123 : InstrItinClass;
-def ALU32_2op_tc_2early_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_2early_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_1_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_2_SLOT0123 : InstrItinClass;
-def ALU32_ADDI_tc_1_SLOT0123 : InstrItinClass;
-def ALU64_tc_1_SLOT23 : InstrItinClass;
-def ALU64_tc_2_SLOT23 : InstrItinClass;
-def ALU64_tc_2early_SLOT23 : InstrItinClass;
-def ALU64_tc_3x_SLOT23 : InstrItinClass;
-def CR_tc_2_SLOT3 : InstrItinClass;
-def CR_tc_2early_SLOT23 : InstrItinClass;
-def CR_tc_2early_SLOT3 : InstrItinClass;
-def CR_tc_3x_SLOT23 : InstrItinClass;
-def CR_tc_3x_SLOT3 : InstrItinClass;
-def J_tc_2early_SLOT23 : InstrItinClass;
-def J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT : InstrItinClass;
-def J_tc_2early_SLOT2 : InstrItinClass;
-def LD_tc_ld_SLOT01 : InstrItinClass;
-def LD_tc_ld_pi_SLOT01 : InstrItinClass;
-def LD_tc_ld_SLOT0 : InstrItinClass;
-def LD_tc_3or4stall_SLOT0 : InstrItinClass;
-def M_tc_2_SLOT23 : InstrItinClass;
-def M_tc_2_acc_SLOT23 : InstrItinClass;
-def M_tc_3_SLOT23 : InstrItinClass;
-def M_tc_1_SLOT23 : InstrItinClass;
-def M_tc_3x_SLOT23 : InstrItinClass;
-def M_tc_3x_acc_SLOT23 : InstrItinClass;
-def M_tc_3or4x_SLOT23 : InstrItinClass;
-def M_tc_3or4x_acc_SLOT23 : InstrItinClass;
-def ST_tc_st_SLOT01 : InstrItinClass;
-def ST_tc_st_pi_SLOT01 : InstrItinClass;
-def ST_tc_st_SLOT0 : InstrItinClass;
-def ST_tc_st_pi_SLOT0 : InstrItinClass;
-def ST_tc_ld_SLOT0 : InstrItinClass;
-def ST_tc_3stall_SLOT0 : InstrItinClass;
-def S_2op_tc_1_SLOT23 : InstrItinClass;
-def S_2op_tc_2_SLOT23 : InstrItinClass;
-def S_2op_tc_2early_SLOT23 : InstrItinClass;
-def S_2op_tc_3or4x_SLOT23 : InstrItinClass;
-def S_3op_tc_1_SLOT23 : InstrItinClass;
-def S_3op_tc_2_SLOT23 : InstrItinClass;
-def S_3op_tc_2early_SLOT23 : InstrItinClass;
-def S_3op_tc_3_SLOT23 : InstrItinClass;
-def S_3op_tc_3x_SLOT23 : InstrItinClass;
-def NCJ_tc_3or4stall_SLOT0 : InstrItinClass;
-def V2LDST_tc_ld_SLOT01 : InstrItinClass;
-def V2LDST_tc_st_SLOT0 : InstrItinClass;
-def V2LDST_tc_st_SLOT01 : InstrItinClass;
-def V4LDST_tc_ld_SLOT01 : InstrItinClass;
-def V4LDST_tc_st_SLOT0 : InstrItinClass;
-def V4LDST_tc_st_SLOT01 : InstrItinClass;
-def J_tc_2early_SLOT0123 : InstrItinClass;
-def EXTENDER_tc_1_SLOT0123 : InstrItinClass;
-def S_3op_tc_3stall_SLOT23 : InstrItinClass;
+def HexagonV4ItinList : DepScalarItinV4, HexagonV4PseudoItin {
+ list<InstrItinData> V4Itin_list = [
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>
+ ];
+ list<InstrItinData> ItinList =
+ !listconcat(V4Itin_list, DepScalarItinV4_list, V4PseudoItin_list);
+}
def HexagonItinerariesV4 :
- ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>]>,
-
- // Jump (conditional/unconditional/return etc)
- // CR
- InstrItinData<CR_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // J
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>]>,
-
- //Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // Store
- // ST
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_st_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- // ST0
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // SYS
- InstrItinData<ST_tc_3stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // Mem ops - MEM_V4
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
-
- // ENDLOOP
- InstrItinData<J_tc_2early_SLOT0123 , [InstrStage<1, [SLOT_ENDLOOP]>]>,
-
- // Extender/PREFIX
- InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>
- ]>;
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP],
+ [Hex_FWD], HexagonV4ItinList.ItinList>;
def HexagonModelV4 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV55.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV55.td
index 06cbcb16abb7..ca738be5d6ef 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV55.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV55.td
@@ -1,4 +1,4 @@
-//=-HexagonScheduleV4.td - HexagonV4 Scheduling Definitions --*- tablegen -*-=//
+//=-HexagonScheduleV55.td - HexagonV55 Scheduling Definitions -*- tablegen -*=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,190 +7,33 @@
//
//===----------------------------------------------------------------------===//
-// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
-// This file describes that machine information.
-//
-// |===========|==================================================|
-// | PIPELINE | Instruction Classes |
-// |===========|==================================================|
-// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
-// |-----------|--------------------------------------------------|
-// | SLOT1 | LD ST ALU32 |
-// |-----------|--------------------------------------------------|
-// | SLOT2 | XTYPE ALU32 J JR |
-// |-----------|--------------------------------------------------|
-// | SLOT3 | XTYPE ALU32 J CR |
-// |===========|==================================================|
+class HexagonV55PseudoItin {
+ list<InstrItinData> V55PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>], [2]>
+ ];
+}
-def CJ_tc_1_SLOT23 : InstrItinClass;
-def CJ_tc_2early_SLOT23 : InstrItinClass;
-def COPROC_VMEM_vtc_long_SLOT01 : InstrItinClass;
-def COPROC_VX_vtc_long_SLOT23 : InstrItinClass;
-def COPROC_VX_vtc_SLOT23 : InstrItinClass;
-def J_tc_3stall_SLOT2 : InstrItinClass;
-def MAPPING_tc_1_SLOT0123 : InstrItinClass;
-def M_tc_3stall_SLOT23 : InstrItinClass;
+def HexagonV55ItinList : DepScalarItinV55,
+ HexagonV55PseudoItin {
+ list<InstrItinData> V55Itin_list = [
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>], [2, 1]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1]>
+ ];
+ list<InstrItinData> ItinList =
+ !listconcat(V55Itin_list, DepScalarItinV55_list,
+ V55PseudoItin_list);
+}
def HexagonItinerariesV55 :
- ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
- [InstrStage<1, [SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
- [3, 1, 1, 1]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
- [2]>,
-
- // Vector
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
-
- // Misc
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>
- ]>;
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP],
+ [Hex_FWD], HexagonV55ItinList.ItinList>;
def HexagonModelV55 : SchedMachineModel {
// Max issue per cycle == bundle width.
@@ -201,5 +44,5 @@ def HexagonModelV55 : SchedMachineModel {
}
//===----------------------------------------------------------------------===//
-// Hexagon V4 Resource Definitions -
+// Hexagon V55 Resource Definitions -
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV60.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV60.td
index 63784710f52b..a2544c92a72c 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV60.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV60.td
@@ -7,61 +7,6 @@
//
//===----------------------------------------------------------------------===//
-// CVI pipes from the "Hexagon Multimedia Co-Processor Extensions Arch Spec".
-def CVI_ST : FuncUnit;
-def CVI_XLANE : FuncUnit;
-def CVI_SHIFT : FuncUnit;
-def CVI_MPY0 : FuncUnit;
-def CVI_MPY1 : FuncUnit;
-def CVI_LD : FuncUnit;
-
-// Combined functional units.
-def CVI_XLSHF : FuncUnit;
-def CVI_MPY01 : FuncUnit;
-def CVI_ALL : FuncUnit;
-def CVI_XLMPY0 : FuncUnit;
-def CVI_SHFMPY1: FuncUnit;
-
-// Combined functional unit data.
-def HexagonComboFuncsV60 :
- ComboFuncUnits<[
- ComboFuncData<CVI_XLSHF , [CVI_XLANE, CVI_SHIFT]>,
- ComboFuncData<CVI_MPY01 , [CVI_MPY0, CVI_MPY1]>,
- ComboFuncData<CVI_ALL , [CVI_ST, CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1, CVI_LD]>,
- ComboFuncData<CVI_XLMPY0 , [CVI_XLANE, CVI_MPY0]>,
- ComboFuncData<CVI_SHFMPY1 , [CVI_SHIFT, CVI_MPY1]>
- ]>;
-
-// Note: When adding additional vector scheduling classes, add the
-// corresponding methods to the class HexagonInstrInfo.
-def CVI_VA : InstrItinClass;
-def CVI_VA_DV : InstrItinClass;
-def CVI_VX_LONG : InstrItinClass;
-def CVI_VX_LATE : InstrItinClass;
-def CVI_VX : InstrItinClass;
-def CVI_VX_DV_LONG : InstrItinClass;
-def CVI_VX_DV : InstrItinClass;
-def CVI_VX_DV_SLOT2 : InstrItinClass;
-def CVI_VX_DV_SLOT2_LONG_EARLY : InstrItinClass;
-def CVI_VP : InstrItinClass;
-def CVI_VP_LONG : InstrItinClass;
-def CVI_VP_VS_EARLY : InstrItinClass;
-def CVI_VP_VS_LONG_EARLY : InstrItinClass;
-def CVI_VP_VS_LONG : InstrItinClass;
-def CVI_VP_VS : InstrItinClass;
-def CVI_VP_DV : InstrItinClass;
-def CVI_VS : InstrItinClass;
-def CVI_VINLANESAT : InstrItinClass;
-def CVI_VM_LD : InstrItinClass;
-def CVI_VM_TMP_LD : InstrItinClass;
-def CVI_VM_CUR_LD : InstrItinClass;
-def CVI_VM_VP_LDU : InstrItinClass;
-def CVI_VM_ST : InstrItinClass;
-def CVI_VM_NEW_ST : InstrItinClass;
-def CVI_VM_STU : InstrItinClass;
-def CVI_HIST : InstrItinClass;
-def CVI_VA_EXT : InstrItinClass;
// There are four SLOTS (four parallel pipelines) in Hexagon V60 machine.
// This file describes that machine information.
@@ -108,196 +53,20 @@ def CVI_VA_EXT : InstrItinClass;
// S0123| CVI_VA_EXT Extract |
// |=====================================================================|
+def HexagonV60ItinList : DepScalarItinV60, ScalarItin,
+ DepHVXItinV60,
+ HVXItin, PseudoItin {
+ list<InstrItinData> ItinList =
+ !listconcat(DepScalarItinV60_list, ScalarItin_list,
+ DepHVXItinV60_list, HVXItin_list, PseudoItin_list);
+}
+
def HexagonItinerariesV60 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP,
CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1,
- CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<3, [SLOT3]>]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<2, [SLOT2]>]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<3, [SLOT2]>]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123, [InstrStage<1,
- [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<4, [SLOT0]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60.
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<4, [SLOT0]>]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<2, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<2, [SLOT_ENDLOOP]>]>,
-
- // Vector
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // Duplex and Compound
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // Misc
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDOM , [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // Latest CVI spec definitions.
- InstrItinData<CVI_VA,[InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE,CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VA_DV,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF, CVI_MPY01]>]>,
- InstrItinData<CVI_VX_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX_LATE, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX,[InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX_DV_LONG,
- [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VX_DV,
- [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VX_DV_SLOT2,
- [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VP, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VP_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VP_VS_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS_LONG,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS_LONG_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_DV , [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VS,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>]>,
- InstrItinData<CVI_VINLANESAT,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>]>,
- InstrItinData<CVI_VM_LD , [InstrStage<1, [SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_TMP_LD,[InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD]>]>,
- InstrItinData<CVI_VM_CUR_LD,[InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_VP_LDU,[InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VM_ST , [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_NEW_ST,[InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [CVI_ST]>]>,
- InstrItinData<CVI_VM_STU , [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_HIST , [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_ALL]>]>
- ]>;
+ CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL,
+ CVI_ALL_NOMEM],
+ [Hex_FWD, HVX_FWD], HexagonV60ItinList.ItinList>;
def HexagonModelV60 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV62.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV62.td
index 0758788a600b..a0a8595f185f 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV62.td
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV62.td
@@ -6,115 +6,23 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
+// ScalarItin contains some old itineraries still used by a
+// handful of instructions. Hopefully, we will be able to get rid of them soon.
-// V62 follows the same schedule as V60 with following exceptions:
-// Following instructions are permissible on any slot on V62:
-// V4_J4_cmpeq_fp0_jump_nt
-// V4_J4_cmpeq_fp0_jump_t
-// V4_J4_cmpeq_fp1_jump_nt
-// V4_J4_cmpeq_fp1_jump_t
-// V4_J4_cmpeq_tp0_jump_nt
-// V4_J4_cmpeq_tp0_jump_t
-// V4_J4_cmpeq_tp1_jump_nt
-// V4_J4_cmpeq_tp1_jump_t
-// V4_J4_cmpeqi_fp0_jump_nt
-// V4_J4_cmpeqi_fp0_jump_t
-// V4_J4_cmpeqi_fp1_jump_nt
-// V4_J4_cmpeqi_fp1_jump_t
-// V4_J4_cmpeqi_tp0_jump_nt
-// V4_J4_cmpeqi_tp0_jump_t
-// V4_J4_cmpeqi_tp1_jump_nt
-// V4_J4_cmpeqi_tp1_jump_t
-// V4_J4_cmpeqn1_fp0_jump_nt
-// V4_J4_cmpeqn1_fp0_jump_t
-// V4_J4_cmpeqn1_fp1_jump_nt
-// V4_J4_cmpeqn1_fp1_jump_t
-// V4_J4_cmpeqn1_tp0_jump_nt
-// V4_J4_cmpeqn1_tp0_jump_t
-// V4_J4_cmpeqn1_tp1_jump_nt
-// V4_J4_cmpeqn1_tp1_jump_t
-// V4_J4_cmpgt_fp0_jump_nt
-// V4_J4_cmpgt_fp0_jump_t
-// V4_J4_cmpgt_fp1_jump_nt
-// V4_J4_cmpgt_fp1_jump_t
-// V4_J4_cmpgt_tp0_jump_nt
-// V4_J4_cmpgt_tp0_jump_t
-// V4_J4_cmpgt_tp1_jump_nt
-// V4_J4_cmpgt_tp1_jump_t
-// V4_J4_cmpgti_fp0_jump_nt
-// V4_J4_cmpgti_fp0_jump_t
-// V4_J4_cmpgti_fp1_jump_nt
-// V4_J4_cmpgti_fp1_jump_t
-// V4_J4_cmpgti_tp0_jump_nt
-// V4_J4_cmpgti_tp0_jump_t
-// V4_J4_cmpgti_tp1_jump_nt
-// V4_J4_cmpgti_tp1_jump_t
-// V4_J4_cmpgtn1_fp0_jump_nt
-// V4_J4_cmpgtn1_fp0_jump_t
-// V4_J4_cmpgtn1_fp1_jump_nt
-// V4_J4_cmpgtn1_fp1_jump_t
-// V4_J4_cmpgtn1_tp0_jump_nt
-// V4_J4_cmpgtn1_tp0_jump_t
-// V4_J4_cmpgtn1_tp1_jump_nt
-// V4_J4_cmpgtn1_tp1_jump_t
-// V4_J4_cmpgtu_fp0_jump_nt
-// V4_J4_cmpgtu_fp0_jump_t
-// V4_J4_cmpgtu_fp1_jump_nt
-// V4_J4_cmpgtu_fp1_jump_t
-// V4_J4_cmpgtu_tp0_jump_nt
-// V4_J4_cmpgtu_tp0_jump_t
-// V4_J4_cmpgtu_tp1_jump_nt
-// V4_J4_cmpgtu_tp1_jump_t
-// V4_J4_cmpgtui_fp0_jump_nt
-// V4_J4_cmpgtui_fp0_jump_t
-// V4_J4_cmpgtui_fp1_jump_nt
-// V4_J4_cmpgtui_fp1_jump_t
-// V4_J4_cmpgtui_tp0_jump_nt
-// V4_J4_cmpgtui_tp0_jump_t
-// V4_J4_cmpgtui_tp1_jump_nt
-// V4_J4_cmpgtui_tp1_jump_t
-// V4_J4_tstbit0_fp0_jump_nt
-// V4_J4_tstbit0_fp0_jump_t
-// V4_J4_tstbit0_fp1_jump_nt
-// V4_J4_tstbit0_fp1_jump_t
-// V4_J4_tstbit0_tp0_jump_nt
-// V4_J4_tstbit0_tp0_jump_t
-// V4_J4_tstbit0_tp1_jump_nt
-// V4_J4_tstbit0_tp1_jump_t
-// JMP
-// JMPEXT
-// JMPEXT_f
-// JMPEXT_fnew_nt
-// JMPEXT_fnew_t
-// JMPEXT_t
-// JMPEXT_tnew_nt
-// JMPEXT_tnew_t
-// JMPNOTEXT
-// JMPNOTEXT_f
-// JMPNOTEXT_fnew_nt
-// JMPNOTEXT_fnew_t
-// JMPNOTEXT_t
-// JMPNOTEXT_tnew_nt
-// JMPNOTEXT_tnew_t
-// JMP_f
-// JMP_fnew_nt
-// JMP_fnew_t
-// JMP_t
-// JMP_tnew_nt
-// JMP_tnew_t
-// RESTORE_DEALLOC_RET_JMP_V4
-// RESTORE_DEALLOC_RET_JMP_V4_EXT
-
-def HexagonV62ItinList : ScalarItin, HVXV62Itin {
+def HexagonV62ItinList : DepScalarItinV62, ScalarItin,
+ DepHVXItinV62, HVXItin, PseudoItin {
list<InstrItinData> ItinList =
- !listconcat(ScalarItin_list, HVXV62Itin_list);
+ !listconcat(DepScalarItinV62_list, ScalarItin_list,
+ DepHVXItinV62_list, HVXItin_list, PseudoItin_list);
}
def HexagonItinerariesV62 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP,
CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1,
- CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL],
- [], HexagonV62ItinList.ItinList>;
+ CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL,
+ CVI_ALL_NOMEM],
+ [Hex_FWD, HVX_FWD], HexagonV62ItinList.ItinList>;
def HexagonModelV62 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index 033b93fc910a..8851a23ae8ac 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -73,6 +73,10 @@ static cl::opt<bool> OverrideLongCalls("hexagon-long-calls",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("If present, forces/disables the use of long calls"));
+static cl::opt<bool> EnablePredicatedCalls("hexagon-pred-calls",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Consider calls to be predicable"));
+
void HexagonSubtarget::initializeEnvironment() {
UseMemOps = false;
ModeIEEERndNear = false;
@@ -139,6 +143,59 @@ HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
UseBSBScheduling = hasV60TOps() && EnableBSBSched;
}
+/// \brief Perform target specific adjustments to the latency of a schedule
+/// dependency.
+void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
+ SDep &Dep) const {
+ MachineInstr *SrcInst = Src->getInstr();
+ MachineInstr *DstInst = Dst->getInstr();
+ if (!Src->isInstr() || !Dst->isInstr())
+ return;
+
+ const HexagonInstrInfo *QII = getInstrInfo();
+
+ // Instructions with .new operands have zero latency.
+ SmallSet<SUnit *, 4> ExclSrc;
+ SmallSet<SUnit *, 4> ExclDst;
+ if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
+ isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ if (!hasV60TOps())
+ return;
+
+ // If it's a REG_SEQUENCE, use its destination instruction to determine
+ // the correct latency.
+ if (DstInst->isRegSequence() && Dst->NumSuccs == 1) {
+ unsigned RSeqReg = DstInst->getOperand(0).getReg();
+ MachineInstr *RSeqDst = Dst->Succs[0].getSUnit()->getInstr();
+ unsigned UseIdx = -1;
+ for (unsigned OpNum = 0; OpNum < RSeqDst->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = RSeqDst->getOperand(OpNum);
+ if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == RSeqReg) {
+ UseIdx = OpNum;
+ break;
+ }
+ }
+ unsigned RSeqLatency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst,
+ 0, *RSeqDst, UseIdx));
+ Dep.setLatency(RSeqLatency);
+ }
+
+ // Try to schedule uses near definitions to generate .cur.
+ ExclSrc.clear();
+ ExclDst.clear();
+ if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
+ isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ updateLatency(*SrcInst, *DstInst, Dep);
+}
+
void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
for (auto &SU : DAG->SUnits) {
@@ -154,19 +211,19 @@ void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
for (auto &SU : DAG->SUnits) {
// Update the latency of chain edges between v60 vector load or store
- // instructions to be 1. These instructions cannot be scheduled in the
+ // instructions to be 1. These instruction cannot be scheduled in the
// same packet.
MachineInstr &MI1 = *SU.getInstr();
auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII);
bool IsStoreMI1 = MI1.mayStore();
bool IsLoadMI1 = MI1.mayLoad();
- if (!QII->isV60VectorInstruction(MI1) || !(IsStoreMI1 || IsLoadMI1))
+ if (!QII->isHVXVec(MI1) || !(IsStoreMI1 || IsLoadMI1))
continue;
for (auto &SI : SU.Succs) {
if (SI.getKind() != SDep::Order || SI.getLatency() != 0)
continue;
MachineInstr &MI2 = *SI.getSUnit()->getInstr();
- if (!QII->isV60VectorInstruction(MI2))
+ if (!QII->isHVXVec(MI2))
continue;
if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) {
SI.setLatency(1);
@@ -204,69 +261,99 @@ bool HexagonSubtarget::enableMachineScheduler() const {
return true;
}
-bool HexagonSubtarget::enableSubRegLiveness() const {
- return EnableSubregLiveness;
+bool HexagonSubtarget::usePredicatedCalls() const {
+ return EnablePredicatedCalls;
}
-// This helper function is responsible for increasing the latency only.
void HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
MachineInstr &DstInst, SDep &Dep) const {
+ if (Dep.isArtificial()) {
+ Dep.setLatency(1);
+ return;
+ }
+
if (!hasV60TOps())
return;
auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
- if (EnableVecFrwdSched && QII.addLatencyToSchedule(SrcInst, DstInst)) {
- // Vec frwd scheduling.
- Dep.setLatency(Dep.getLatency() + 1);
- } else if (useBSBScheduling() &&
- QII.isLateInstrFeedsEarlyInstr(SrcInst, DstInst)) {
- // BSB scheduling.
- Dep.setLatency(Dep.getLatency() + 1);
- } else if (EnableTCLatencySched) {
- // TClass latency scheduling.
- // Check if SrcInst produces in 2C an operand of DstInst taken in stage 2B.
- if (QII.isTC1(SrcInst) || QII.isTC2(SrcInst))
- if (!QII.isTC1(DstInst) && !QII.isTC2(DstInst))
- Dep.setLatency(Dep.getLatency() + 1);
- }
+ // BSB scheduling.
+ if (QII.isHVXVec(SrcInst) || useBSBScheduling())
+ Dep.setLatency((Dep.getLatency() + 1) >> 1);
}
-/// If the SUnit has a zero latency edge, return the other SUnit.
-static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
- for (auto &I : Deps)
- if (I.isAssignedRegDep() && I.getLatency() == 0 &&
- !I.getSUnit()->getInstr()->isPseudo())
- return I.getSUnit();
- return nullptr;
+void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const {
+ MachineInstr *SrcI = Src->getInstr();
+ for (auto &I : Src->Succs) {
+ if (!I.isAssignedRegDep() || I.getSUnit() != Dst)
+ continue;
+ unsigned DepR = I.getReg();
+ int DefIdx = -1;
+ for (unsigned OpNum = 0; OpNum < SrcI->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = SrcI->getOperand(OpNum);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == DepR)
+ DefIdx = OpNum;
+ }
+ assert(DefIdx >= 0 && "Def Reg not found in Src MI");
+ MachineInstr *DstI = Dst->getInstr();
+ for (unsigned OpNum = 0; OpNum < DstI->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = DstI->getOperand(OpNum);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == DepR) {
+ int Latency = (InstrInfo.getOperandLatency(&InstrItins, *SrcI,
+ DefIdx, *DstI, OpNum));
+
+ // For some instructions (ex: COPY), we might end up with < 0 latency
+ // as they don't have any Itinerary class associated with them.
+ if (Latency <= 0)
+ Latency = 1;
+
+ I.setLatency(Latency);
+ updateLatency(*SrcI, *DstI, I);
+ }
+ }
+
+ // Update the latency of opposite edge too.
+ for (auto &J : Dst->Preds) {
+ if (J.getSUnit() != Src)
+ continue;
+ J.setLatency(I.getLatency());
+ }
+ }
}
/// Change the latency between the two SUnits.
-void HexagonSubtarget::changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps,
- SUnit *Dst, unsigned Lat) const {
- MachineInstr &SrcI = *Src->getInstr();
- for (auto &I : Deps) {
+void HexagonSubtarget::changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat)
+ const {
+ for (auto &I : Src->Succs) {
if (I.getSUnit() != Dst)
continue;
+ SDep T = I;
I.setLatency(Lat);
- SUnit *UpdateDst = I.getSUnit();
- updateLatency(SrcI, *UpdateDst->getInstr(), I);
+
// Update the latency of opposite edge too.
- for (auto &PI : UpdateDst->Preds) {
- if (PI.getSUnit() != Src || !PI.isAssignedRegDep())
- continue;
- PI.setLatency(Lat);
- updateLatency(SrcI, *UpdateDst->getInstr(), PI);
- }
+ T.setSUnit(Src);
+ auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T);
+ assert(F != Dst->Preds.end());
+ F->setLatency(I.getLatency());
}
}
+/// If the SUnit has a zero latency edge, return the other SUnit.
+static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
+ for (auto &I : Deps)
+ if (I.isAssignedRegDep() && I.getLatency() == 0 &&
+ !I.getSUnit()->getInstr()->isPseudo())
+ return I.getSUnit();
+ return nullptr;
+}
+
// Return true if these are the best two instructions to schedule
// together with a zero latency. Only one dependence should have a zero
// latency. If there are multiple choices, choose the best, and change
-// ther others, if needed.
+// the others, if needed.
bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
- const HexagonInstrInfo *TII) const {
+ const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc,
+ SmallSet<SUnit*, 4> &ExclDst) const {
MachineInstr &SrcInst = *Src->getInstr();
MachineInstr &DstInst = *Dst->getInstr();
@@ -277,6 +364,16 @@ bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
if (SrcInst.isPHI() || DstInst.isPHI())
return false;
+ if (!TII->isToBeScheduledASAP(SrcInst, DstInst) &&
+ !TII->canExecuteInBundle(SrcInst, DstInst))
+ return false;
+
+ // The architecture doesn't allow three dependent instructions in the same
+ // packet. So, if the destination has a zero latency successor, then it's
+ // not a candidate for a zero latency predecessor.
+ if (getZeroLatency(Dst, Dst->Succs) != nullptr)
+ return false;
+
// Check if the Dst instruction is the best candidate first.
SUnit *Best = nullptr;
SUnit *DstBest = nullptr;
@@ -290,98 +387,53 @@ bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
if (Best != Dst)
return false;
- // The caller frequents adds the same dependence twice. If so, then
+ // The caller frequently adds the same dependence twice. If so, then
// return true for this case too.
- if (Src == SrcBest && Dst == DstBest)
+ if ((Src == SrcBest && Dst == DstBest ) ||
+ (SrcBest == nullptr && Dst == DstBest) ||
+ (Src == SrcBest && Dst == nullptr))
return true;
// Reassign the latency for the previous bests, which requires setting
// the dependence edge in both directions.
- if (SrcBest != nullptr)
- changeLatency(SrcBest, SrcBest->Succs, Dst, 1);
- if (DstBest != nullptr)
- changeLatency(Src, Src->Succs, DstBest, 1);
- // If there is an edge from SrcBest to DstBst, then try to change that
- // to 0 now.
- if (SrcBest && DstBest)
- changeLatency(SrcBest, SrcBest->Succs, DstBest, 0);
-
- return true;
-}
-
-// Update the latency of a Phi when the Phi bridges two instructions that
-// require a multi-cycle latency.
-void HexagonSubtarget::changePhiLatency(MachineInstr &SrcInst, SUnit *Dst,
- SDep &Dep) const {
- if (!SrcInst.isPHI() || Dst->NumPreds == 0 || Dep.getLatency() != 0)
- return;
-
- for (const SDep &PI : Dst->Preds) {
- if (PI.getLatency() != 0)
- continue;
- Dep.setLatency(2);
- break;
- }
-}
-
-/// \brief Perform target specific adjustments to the latency of a schedule
-/// dependency.
-void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
- SDep &Dep) const {
- MachineInstr *SrcInst = Src->getInstr();
- MachineInstr *DstInst = Dst->getInstr();
- if (!Src->isInstr() || !Dst->isInstr())
- return;
-
- const HexagonInstrInfo *QII = static_cast<const HexagonInstrInfo *>(getInstrInfo());
-
- // Instructions with .new operands have zero latency.
- if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
- isBestZeroLatency(Src, Dst, QII)) {
- Dep.setLatency(0);
- return;
+ if (SrcBest != nullptr) {
+ if (!hasV60TOps())
+ changeLatency(SrcBest, Dst, 1);
+ else
+ restoreLatency(SrcBest, Dst);
}
-
- if (!hasV60TOps())
- return;
-
- // Don't adjust the latency of post-increment part of the instruction.
- if (QII->isPostIncrement(*SrcInst) && Dep.isAssignedRegDep()) {
- if (SrcInst->mayStore())
- return;
- if (Dep.getReg() != SrcInst->getOperand(0).getReg())
- return;
- } else if (QII->isPostIncrement(*DstInst) && Dep.getKind() == SDep::Anti) {
- if (DstInst->mayStore())
- return;
- if (Dep.getReg() != DstInst->getOperand(0).getReg())
- return;
- } else if (QII->isPostIncrement(*DstInst) && DstInst->mayStore() &&
- Dep.isAssignedRegDep()) {
- MachineOperand &Op = DstInst->getOperand(DstInst->getNumOperands() - 1);
- if (Op.isReg() && Dep.getReg() != Op.getReg())
- return;
- }
-
- // Check if we need to change any the latency values when Phis are added.
- if (useBSBScheduling() && SrcInst->isPHI()) {
- changePhiLatency(*SrcInst, Dst, Dep);
- return;
+ if (DstBest != nullptr) {
+ if (!hasV60TOps())
+ changeLatency(Src, DstBest, 1);
+ else
+ restoreLatency(Src, DstBest);
}
- // If it's a REG_SEQUENCE, use its destination instruction to determine
- // the correct latency.
- if (DstInst->isRegSequence() && Dst->NumSuccs == 1)
- DstInst = Dst->Succs[0].getSUnit()->getInstr();
-
- // Try to schedule uses near definitions to generate .cur.
- if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
- isBestZeroLatency(Src, Dst, QII)) {
- Dep.setLatency(0);
- return;
+ // Attempt to find another opprotunity for zero latency in a different
+ // dependence.
+ if (SrcBest && DstBest)
+ // If there is an edge from SrcBest to DstBst, then try to change that
+ // to 0 now.
+ changeLatency(SrcBest, DstBest, 0);
+ else if (DstBest) {
+ // Check if the previous best destination instruction has a new zero
+ // latency dependence opportunity.
+ ExclSrc.insert(Src);
+ for (auto &I : DstBest->Preds)
+ if (ExclSrc.count(I.getSUnit()) == 0 &&
+ isBestZeroLatency(I.getSUnit(), DstBest, TII, ExclSrc, ExclDst))
+ changeLatency(I.getSUnit(), DstBest, 0);
+ } else if (SrcBest) {
+ // Check if previous best source instruction has a new zero latency
+ // dependence opportunity.
+ ExclDst.insert(Dst);
+ for (auto &I : SrcBest->Succs)
+ if (ExclDst.count(I.getSUnit()) == 0 &&
+ isBestZeroLatency(SrcBest, I.getSUnit(), TII, ExclSrc, ExclDst))
+ changeLatency(SrcBest, I.getSUnit(), 0);
}
- updateLatency(*SrcInst, *DstInst, Dep);
+ return true;
}
unsigned HexagonSubtarget::getL1CacheLineSize() const {
@@ -392,3 +444,7 @@ unsigned HexagonSubtarget::getL1PrefetchDistance() const {
return 32;
}
+bool HexagonSubtarget::enableSubRegLiveness() const {
+ return EnableSubregLiveness;
+}
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 6a3e7f13be4c..4379efa79c9c 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -104,6 +104,7 @@ public:
bool useHVXDblOps() const { return UseHVXOps && UseHVXDblOps; }
bool useHVXSglOps() const { return UseHVXOps && !UseHVXDblOps; }
bool useLongCalls() const { return UseLongCalls; }
+ bool usePredicatedCalls() const;
bool useBSBScheduling() const { return UseBSBScheduling; }
bool enableMachineScheduler() const override;
@@ -146,11 +147,10 @@ private:
// Helper function responsible for increasing the latency only.
void updateLatency(MachineInstr &SrcInst, MachineInstr &DstInst, SDep &Dep)
const;
- void changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps, SUnit *Dst,
- unsigned Lat) const;
- bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII)
- const;
- void changePhiLatency(MachineInstr &SrcInst, SUnit *Dst, SDep &Dep) const;
+ void restoreLatency(SUnit *Src, SUnit *Dst) const;
+ void changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) const;
+ bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
+ SmallSet<SUnit*, 4> &ExclSrc, SmallSet<SUnit*, 4> &ExclDst) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index bf1dce67bd0a..c21b6e2515d3 100644
--- a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -334,7 +334,7 @@ bool HexagonPacketizerList::isNewifiable(const MachineInstr &MI,
// Vector stores can be predicated, and can be new-value stores, but
// they cannot be predicated on a .new predicate value.
if (NewRC == &Hexagon::PredRegsRegClass)
- if (HII->isV60VectorInstruction(MI) && MI.mayStore())
+ if (HII->isHVXVec(MI) && MI.mayStore())
return false;
return HII->isCondInst(MI) || HII->isJumpR(MI) || MI.isReturn() ||
HII->mayBeNewStore(MI);
@@ -377,9 +377,9 @@ void HexagonPacketizerList::cleanUpDotCur() {
bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr &MI,
const SUnit *PacketSU, unsigned DepReg, MachineBasicBlock::iterator &MII,
const TargetRegisterClass *RC) {
- if (!HII->isV60VectorInstruction(MI))
+ if (!HII->isHVXVec(MI))
return false;
- if (!HII->isV60VectorInstruction(*MII))
+ if (!HII->isHVXVec(*MII))
return false;
// Already a dot new instruction.
@@ -1365,7 +1365,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// Data dpendence ok if we have load.cur.
if (DepType == SDep::Data && HII->isDotCurInst(J)) {
- if (HII->isV60VectorInstruction(I))
+ if (HII->isHVXVec(I))
continue;
}
@@ -1374,6 +1374,8 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
if (canPromoteToDotNew(I, SUJ, DepReg, II, RC)) {
if (promoteToDotNew(I, DepType, II, RC)) {
PromotedToDotNew = true;
+ if (cannotCoexist(I, J))
+ FoundSequentialDependence = true;
continue;
}
}
@@ -1418,26 +1420,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
DepType != SDep::Output)
continue;
- // Ignore output dependences due to superregs. We can write to two
- // different subregisters of R1:0 for instance in the same cycle.
-
- // If neither I nor J defines DepReg, then this is a superfluous output
- // dependence. The dependence must be of the form:
- // R0 = ...
- // R1 = ...
- // and there is an output dependence between the two instructions with
- // DepReg = D0.
- // We want to ignore these dependences. Ideally, the dependence
- // constructor should annotate such dependences. We can then avoid this
- // relatively expensive check.
- //
if (DepType == SDep::Output) {
- // DepReg is the register that's responsible for the dependence.
- unsigned DepReg = SUJ->Succs[i].getReg();
-
- // Check if I and J really defines DepReg.
- if (!I.definesRegister(DepReg) && !J.definesRegister(DepReg))
- continue;
FoundSequentialDependence = true;
break;
}
@@ -1553,10 +1536,9 @@ bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
MachineInstr &I = *SUI->getInstr();
MachineInstr &J = *SUJ->getInstr();
- if (cannotCoexist(I, J))
- return false;
+ bool Coexist = !cannotCoexist(I, J);
- if (!Dependence)
+ if (Coexist && !Dependence)
return true;
// Check if the instruction was promoted to a dot-new. If so, demote it
@@ -1659,21 +1641,6 @@ bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
}
-// Return true when ConsMI uses a register defined by ProdMI.
-static bool isDependent(const MachineInstr &ProdMI,
- const MachineInstr &ConsMI) {
- if (!ProdMI.getOperand(0).isReg())
- return false;
- unsigned DstReg = ProdMI.getOperand(0).getReg();
-
- for (auto &Op : ConsMI.operands())
- if (Op.isReg() && Op.isUse() && Op.getReg() == DstReg)
- // The MIs depend on each other.
- return true;
-
- return false;
-}
-
// V60 forward scheduling.
bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
// If the packet already stalls, then ignore the stall from a subsequent
@@ -1695,40 +1662,48 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
return false;
}
- // Check for stall between two vector instructions.
- if (HII->isV60VectorInstruction(I)) {
- for (auto J : OldPacketMIs) {
- if (!HII->isV60VectorInstruction(*J))
- continue;
- if (isDependent(*J, I) && !HII->isVecUsableNextPacket(*J, I))
- return true;
- }
+ SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
- return false;
- }
+ // Check if the latency is 0 between this instruction and any instruction
+ // in the current packet. If so, we disregard any potential stalls due to
+ // the instructions in the previous packet. Most of the instruction pairs
+ // that can go together in the same packet have 0 latency between them.
+ // Only exceptions are newValueJumps as they're generated much later and
+ // the latencies can't be changed at that point. Another is .cur
+ // instructions if its consumer has a 0 latency successor (such as .new).
+ // In this case, the latency between .cur and the consumer stays non-zero
+ // even though we can have both .cur and .new in the same packet. Changing
+ // the latency to 0 is not an option as it causes software pipeliner to
+ // not pipeline in some cases.
+
+ // For Example:
+ // {
+ // I1: v6.cur = vmem(r0++#1)
+ // I2: v7 = valign(v6,v4,r2)
+ // I3: vmem(r5++#1) = v7.new
+ // }
+ // Here I2 and I3 has 0 cycle latency, but I1 and I2 has 2.
- // Check for stall between two scalar instructions. First, check that
- // there is no definition of a use in the current packet, because it
- // may be a candidate for .new.
- for (auto J : CurrentPacketMIs)
- if (!HII->isV60VectorInstruction(*J) && isDependent(*J, I))
- return false;
+ for (auto J : CurrentPacketMIs) {
+ SUnit *SUJ = MIToSUnit[J];
+ for (auto &Pred : SUI->Preds)
+ if (Pred.getSUnit() == SUJ &&
+ (Pred.getLatency() == 0 || HII->isNewValueJump(I) ||
+ HII->isToBeScheduledASAP(*J, I)))
+ return false;
+ }
- // Check for stall between I and instructions in the previous packet.
- if (MF.getSubtarget<HexagonSubtarget>().useBSBScheduling()) {
- for (auto J : OldPacketMIs) {
- if (HII->isV60VectorInstruction(*J))
- continue;
- if (!HII->isLateInstrFeedsEarlyInstr(*J, I))
- continue;
- if (isDependent(*J, I) && !HII->canExecuteInBundle(*J, I))
+ // Check if the latency is greater than one between this instruction and any
+ // instruction in the previous packet.
+ for (auto J : OldPacketMIs) {
+ SUnit *SUJ = MIToSUnit[J];
+ for (auto &Pred : SUI->Preds)
+ if (Pred.getSUnit() == SUJ && Pred.getLatency() > 1)
return true;
- }
}
// Check if the latency is greater than one between this instruction and any
// instruction in the previous packet.
- SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
for (auto J : OldPacketMIs) {
SUnit *SUJ = MIToSUnit[J];
for (auto &Pred : SUI->Preds)
@@ -1739,7 +1714,6 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
return false;
}
-
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index adb546dc2140..d8009c5da08e 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -29,7 +29,7 @@ namespace llvm {
///
namespace HexagonII {
unsigned const TypeCVI_FIRST = TypeCVI_HIST;
- unsigned const TypeCVI_LAST = TypeCVI_VX_DV;
+ unsigned const TypeCVI_LAST = TypeCVI_VX_LATE;
enum SubTarget {
HasV4SubT = 0x3f,
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index dfb5f4cc8260..70410ff03a64 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -788,14 +788,6 @@ HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
if (HexagonMCInstrInfo::isSubInstruction(MI) ||
llvm::HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCJ)
return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
- switch(MI.getOpcode()){
- case Hexagon::A2_tfrrcr:
- case Hexagon::A2_tfrcrr:
- if(Reg == Hexagon::M0)
- Reg = Hexagon::C6;
- if(Reg == Hexagon::M1)
- Reg = Hexagon::C7;
- }
return MCT.getRegisterInfo()->getEncodingValue(Reg);
}
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index a5afa1daeb9e..564d43b45cb8 100644
--- a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -102,12 +102,13 @@ void HexagonCVIResource::SetupTUL(TypeUnitsAndLanes *TUL, StringRef CPU) {
UnitsAndLanes(CVI_XLANE | CVI_SHIFT | CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VA_DV] = UnitsAndLanes(CVI_XLANE | CVI_MPY0, 2);
(*TUL)[HexagonII::TypeCVI_VX] = UnitsAndLanes(CVI_MPY0 | CVI_MPY1, 1);
+ (*TUL)[HexagonII::TypeCVI_VX_LATE] = UnitsAndLanes(CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VX_DV] = UnitsAndLanes(CVI_MPY0, 2);
(*TUL)[HexagonII::TypeCVI_VP] = UnitsAndLanes(CVI_XLANE, 1);
(*TUL)[HexagonII::TypeCVI_VP_VS] = UnitsAndLanes(CVI_XLANE, 2);
(*TUL)[HexagonII::TypeCVI_VS] = UnitsAndLanes(CVI_SHIFT, 1);
(*TUL)[HexagonII::TypeCVI_VINLANESAT] =
- (CPU == "hexagonv60" || CPU == "hexagonv61" || CPU == "hexagonv61v1")
+ (CPU == "hexagonv60")
? UnitsAndLanes(CVI_SHIFT, 1)
: UnitsAndLanes(CVI_XLANE | CVI_SHIFT | CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VM_LD] =
@@ -291,10 +292,8 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeNCJ:
++memory; // NV insns are memory-like.
- if (HexagonMCInstrInfo::getDesc(MCII, ID).isBranch()) {
- ++jumps, ++jump1;
- foundBranches.push_back(ISJ);
- }
+ ++jumps, ++jump1;
+ foundBranches.push_back(ISJ);
break;
case HexagonII::TypeV2LDST:
if (HexagonMCInstrInfo::getDesc(MCII, ID).mayLoad()) {
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFLiveness.cpp b/contrib/llvm/lib/Target/Hexagon/RDFLiveness.cpp
index 726b7af73b0a..9d8a3881797b 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/RDFLiveness.cpp
@@ -497,26 +497,33 @@ void Liveness::computePhiInfo() {
// = R1:0 u6 Not reached by d1 (covered collectively
// by d3 and d5), but following reached
// defs and uses from d1 will lead here.
- auto InPhiDefs = [&PhiDefs] (NodeAddr<DefNode*> DA) -> bool {
- return PhiDefs.count(DA.Id);
- };
for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
// For each reached register UI->first, there is a set UI->second, of
// uses of it. For each such use, check if it is reached by this phi,
// i.e. check if the set of its reaching uses intersects the set of
// this phi's defs.
- NodeRefSet &Uses = UI->second;
- for (auto I = Uses.begin(), E = Uses.end(); I != E; ) {
- auto UA = DFG.addr<UseNode*>(I->first);
+ NodeRefSet Uses = UI->second;
+ UI->second.clear();
+ for (std::pair<NodeId,LaneBitmask> I : Uses) {
+ auto UA = DFG.addr<UseNode*>(I.first);
// Undef flag is checked above.
assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
- RegisterRef R(UI->first, I->second);
- NodeList RDs = getAllReachingDefs(R, UA);
- // If none of the reaching defs of R are from this phi, remove this
- // use of R.
- I = any_of(RDs, InPhiDefs) ? std::next(I) : Uses.erase(I);
+ RegisterRef R(UI->first, I.second);
+ // Calculate the exposed part of the reached use.
+ RegisterAggr Covered(PRI);
+ for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
+ if (PhiDefs.count(DA.Id))
+ break;
+ Covered.insert(DA.Addr->getRegRef(DFG));
+ }
+ if (RegisterRef RC = Covered.clearIn(R)) {
+ // We are updating the map for register UI->first, so we need
+ // to map RC to be expressed in terms of that register.
+ RegisterRef S = PRI.mapTo(RC, UI->first);
+ UI->second.insert({I.first, S.Mask});
+ }
}
- UI = Uses.empty() ? RealUses.erase(UI) : std::next(UI);
+ UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
}
// If this phi reaches some "real" uses, add it to the queue for upward
@@ -626,7 +633,7 @@ void Liveness::computePhiInfo() {
const RegisterAggr &DRs = PhiDRs.at(P.first);
if (!DRs.hasAliasOf(R))
continue;
- R = DRs.intersectWith(R);
+ R = PRI.mapTo(DRs.intersectWith(R), T.first);
for (std::pair<NodeId,LaneBitmask> V : T.second) {
LaneBitmask M = R.Mask & V.second;
if (M.none())
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp
index 4224ded3418b..2aabf4ee1a38 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.cpp
@@ -212,6 +212,21 @@ bool PhysicalRegisterInfo::aliasMM(RegisterRef RM, RegisterRef RN) const {
return false;
}
+RegisterRef PhysicalRegisterInfo::mapTo(RegisterRef RR, unsigned R) const {
+ if (RR.Reg == R)
+ return RR;
+ if (unsigned Idx = TRI.getSubRegIndex(R, RR.Reg))
+ return RegisterRef(R, TRI.composeSubRegIndexLaneMask(Idx, RR.Mask));
+ if (unsigned Idx = TRI.getSubRegIndex(RR.Reg, R)) {
+ const RegInfo &RI = RegInfos[R];
+ LaneBitmask RCM = RI.RegClass ? RI.RegClass->LaneMask
+ : LaneBitmask::getAll();
+ LaneBitmask M = TRI.reverseComposeSubRegIndexLaneMask(Idx, RR.Mask);
+ return RegisterRef(R, M & RCM);
+ }
+ llvm_unreachable("Invalid arguments: unrelated registers?");
+}
+
bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
if (PhysicalRegisterInfo::isRegMaskId(RR.Reg))
diff --git a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h
index 314d8b5666d7..09b733ce616b 100644
--- a/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h
+++ b/contrib/llvm/lib/Target/Hexagon/RDFRegisters.h
@@ -112,6 +112,7 @@ namespace rdf {
const BitVector &getMaskUnits(RegisterId MaskId) const {
return MaskInfos[TargetRegisterInfo::stackSlot2Index(MaskId)].Units;
}
+ RegisterRef mapTo(RegisterRef RR, unsigned R) const;
const TargetRegisterInfo &getTRI() const { return TRI; }
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 134f7ac3aea3..9cdbf510737f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -81,7 +81,7 @@ bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
AsmPrinter::runOnMachineFunction(MF);
- EmitXRayTable();
+ emitXRayTable();
return true;
}
@@ -1148,39 +1148,6 @@ void MipsAsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind) {
recordSled(CurSled, MI, Kind);
}
-void MipsAsmPrinter::EmitXRayTable() {
- if (Sleds.empty())
- return;
- if (Subtarget->isTargetELF()) {
- auto PrevSection = OutStreamer->getCurrentSectionOnly();
- auto Fn = MF->getFunction();
- MCSection *Section;
-
- if (Fn->hasComdat())
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
- Fn->getComdat()->getName());
- else
- Section =
- OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC, 0, CurrentFnSym->getName());
-
- OutStreamer->SwitchSection(Section);
- for (const auto &Sled : Sleds) {
- OutStreamer->EmitSymbolValue(Sled.Sled, Subtarget->isGP64bit() ? 8 : 4);
- OutStreamer->EmitSymbolValue(CurrentFnSym, Subtarget->isGP64bit() ? 8 : 4);
- auto Kind = static_cast<uint8_t>(Sled.Kind);
- OutStreamer->EmitBytes(
- StringRef(reinterpret_cast<const char *>(&Kind), 1));
- OutStreamer->EmitBytes(
- StringRef(reinterpret_cast<const char *>(&Sled.AlwaysInstrument), 1));
- OutStreamer->EmitZeros(Subtarget->isGP64bit() ? 14 : 6);
- }
- OutStreamer->SwitchSection(PrevSection);
- }
- Sleds.clear();
-}
-
void MipsAsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI) {
EmitSled(MI, SledKind::FUNCTION_ENTER);
}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 4d06912054a2..61fdda8aa109 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1,4661 +1,4662 @@
-//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that NVPTX uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/NVPTXBaseInfo.h"
-#include "NVPTX.h"
-#include "NVPTXISelLowering.h"
-#include "NVPTXSection.h"
-#include "NVPTXSubtarget.h"
-#include "NVPTXTargetMachine.h"
-#include "NVPTXTargetObjectFile.h"
-#include "NVPTXUtilities.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/CodeGen/Analysis.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/MachineValueType.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/IR/Argument.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CodeGen.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetCallingConv.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "nvptx-lower"
-
-using namespace llvm;
-
-static unsigned int uniqueCallSite = 0;
-
-static cl::opt<bool> sched4reg(
- "nvptx-sched4reg",
- cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
-
-static cl::opt<unsigned>
-FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
- " 1: do it 2: do it aggressively"),
- cl::init(2));
-
-static cl::opt<int> UsePrecDivF32(
- "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
- " IEEE Compliant F32 div.rnd if available."),
- cl::init(2));
-
-static cl::opt<bool> UsePrecSqrtF32(
- "nvptx-prec-sqrtf32", cl::Hidden,
- cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
- cl::init(true));
-
-static cl::opt<bool> FtzEnabled(
- "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
- cl::init(false));
-
-int NVPTXTargetLowering::getDivF32Level() const {
- if (UsePrecDivF32.getNumOccurrences() > 0) {
- // If nvptx-prec-div32=N is used on the command-line, always honor it
- return UsePrecDivF32;
- } else {
- // Otherwise, use div.approx if fast math is enabled
- if (getTargetMachine().Options.UnsafeFPMath)
- return 0;
- else
- return 2;
- }
-}
-
-bool NVPTXTargetLowering::usePrecSqrtF32() const {
- if (UsePrecSqrtF32.getNumOccurrences() > 0) {
- // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
- return UsePrecSqrtF32;
- } else {
- // Otherwise, use sqrt.approx if fast math is enabled
- return !getTargetMachine().Options.UnsafeFPMath;
- }
-}
-
-bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
- // TODO: Get rid of this flag; there can be only one way to do this.
- if (FtzEnabled.getNumOccurrences() > 0) {
- // If nvptx-f32ftz is used on the command-line, always honor it
- return FtzEnabled;
- } else {
- const Function *F = MF.getFunction();
- // Otherwise, check for an nvptx-f32ftz attribute on the function
- if (F->hasFnAttribute("nvptx-f32ftz"))
- return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
- else
- return false;
- }
-}
-
-static bool IsPTXVectorType(MVT VT) {
- switch (VT.SimpleTy) {
- default:
- return false;
- case MVT::v2i1:
- case MVT::v4i1:
- case MVT::v2i8:
- case MVT::v4i8:
- case MVT::v2i16:
- case MVT::v4i16:
- case MVT::v2i32:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v4f16:
- case MVT::v8f16: // <4 x f16x2>
- case MVT::v2f32:
- case MVT::v4f32:
- case MVT::v2f64:
- return true;
- }
-}
-
-/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
-/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
-/// into their primitive components.
-/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
-/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
-/// LowerCall, and LowerReturn.
-static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
- Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets = nullptr,
- uint64_t StartingOffset = 0) {
- SmallVector<EVT, 16> TempVTs;
- SmallVector<uint64_t, 16> TempOffsets;
-
- ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
- for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
- EVT VT = TempVTs[i];
- uint64_t Off = TempOffsets[i];
- // Split vectors into individual elements, except for v2f16, which
- // we will pass as a single scalar.
- if (VT.isVector()) {
- unsigned NumElts = VT.getVectorNumElements();
- EVT EltVT = VT.getVectorElementType();
- // Vectors with an even number of f16 elements will be passed to
- // us as an array of v2f16 elements. We must match this so we
- // stay in sync with Ins/Outs.
- if (EltVT == MVT::f16 && NumElts % 2 == 0) {
- EltVT = MVT::v2f16;
- NumElts /= 2;
- }
- for (unsigned j = 0; j != NumElts; ++j) {
- ValueVTs.push_back(EltVT);
- if (Offsets)
- Offsets->push_back(Off + j * EltVT.getStoreSize());
- }
- } else {
- ValueVTs.push_back(VT);
- if (Offsets)
- Offsets->push_back(Off);
- }
- }
-}
-
-// Check whether we can merge loads/stores of some of the pieces of a
-// flattened function parameter or return value into a single vector
-// load/store.
-//
-// The flattened parameter is represented as a list of EVTs and
-// offsets, and the whole structure is aligned to ParamAlignment. This
-// function determines whether we can load/store pieces of the
-// parameter starting at index Idx using a single vectorized op of
-// size AccessSize. If so, it returns the number of param pieces
-// covered by the vector op. Otherwise, it returns 1.
-static unsigned CanMergeParamLoadStoresStartingAt(
- unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
- const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
- assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
-
- // Can't vectorize if param alignment is not sufficient.
- if (AccessSize > ParamAlignment)
- return 1;
- // Can't vectorize if offset is not aligned.
- if (Offsets[Idx] & (AccessSize - 1))
- return 1;
-
- EVT EltVT = ValueVTs[Idx];
- unsigned EltSize = EltVT.getStoreSize();
-
- // Element is too large to vectorize.
- if (EltSize >= AccessSize)
- return 1;
-
- unsigned NumElts = AccessSize / EltSize;
- // Can't vectorize if AccessBytes if not a multiple of EltSize.
- if (AccessSize != EltSize * NumElts)
- return 1;
-
- // We don't have enough elements to vectorize.
- if (Idx + NumElts > ValueVTs.size())
- return 1;
-
- // PTX ISA can only deal with 2- and 4-element vector ops.
- if (NumElts != 4 && NumElts != 2)
- return 1;
-
- for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
- // Types do not match.
- if (ValueVTs[j] != EltVT)
- return 1;
-
- // Elements are not contiguous.
- if (Offsets[j] - Offsets[j - 1] != EltSize)
- return 1;
- }
- // OK. We can vectorize ValueVTs[i..i+NumElts)
- return NumElts;
-}
-
-// Flags for tracking per-element vectorization state of loads/stores
-// of a flattened function parameter or return value.
-enum ParamVectorizationFlags {
- PVF_INNER = 0x0, // Middle elements of a vector.
- PVF_FIRST = 0x1, // First element of the vector.
- PVF_LAST = 0x2, // Last element of the vector.
- // Scalar is effectively a 1-element vector.
- PVF_SCALAR = PVF_FIRST | PVF_LAST
-};
-
-// Computes whether and how we can vectorize the loads/stores of a
-// flattened function parameter or return value.
-//
-// The flattened parameter is represented as the list of ValueVTs and
-// Offsets, and is aligned to ParamAlignment bytes. We return a vector
-// of the same size as ValueVTs indicating how each piece should be
-// loaded/stored (i.e. as a scalar, or as part of a vector
-// load/store).
-static SmallVector<ParamVectorizationFlags, 16>
-VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
- const SmallVectorImpl<uint64_t> &Offsets,
- unsigned ParamAlignment) {
- // Set vector size to match ValueVTs and mark all elements as
- // scalars by default.
- SmallVector<ParamVectorizationFlags, 16> VectorInfo;
- VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
-
- // Check what we can vectorize using 128/64/32-bit accesses.
- for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
- // Skip elements we've already processed.
- assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
- for (unsigned AccessSize : {16, 8, 4, 2}) {
- unsigned NumElts = CanMergeParamLoadStoresStartingAt(
- I, AccessSize, ValueVTs, Offsets, ParamAlignment);
- // Mark vectorized elements.
- switch (NumElts) {
- default:
- llvm_unreachable("Unexpected return value");
- case 1:
- // Can't vectorize using this size, try next smaller size.
- continue;
- case 2:
- assert(I + 1 < E && "Not enough elements.");
- VectorInfo[I] = PVF_FIRST;
- VectorInfo[I + 1] = PVF_LAST;
- I += 1;
- break;
- case 4:
- assert(I + 3 < E && "Not enough elements.");
- VectorInfo[I] = PVF_FIRST;
- VectorInfo[I + 1] = PVF_INNER;
- VectorInfo[I + 2] = PVF_INNER;
- VectorInfo[I + 3] = PVF_LAST;
- I += 3;
- break;
- }
- // Break out of the inner loop because we've already succeeded
- // using largest possible AccessSize.
- break;
- }
- }
- return VectorInfo;
-}
-
-// NVPTXTargetLowering Constructor.
-NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
- const NVPTXSubtarget &STI)
- : TargetLowering(TM), nvTM(&TM), STI(STI) {
- // always lower memset, memcpy, and memmove intrinsics to load/store
- // instructions, rather
- // then generating calls to memset, mempcy or memmove.
- MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
- MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
- MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
-
- setBooleanContents(ZeroOrNegativeOneBooleanContent);
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
-
- // Jump is Expensive. Don't create extra control flow for 'and', 'or'
- // condition branches.
- setJumpIsExpensive(true);
-
- // Wide divides are _very_ slow. Try to reduce the width of the divide if
- // possible.
- addBypassSlowDiv(64, 32);
-
- // By default, use the Source scheduling
- if (sched4reg)
- setSchedulingPreference(Sched::RegPressure);
- else
- setSchedulingPreference(Sched::Source);
-
- auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
- LegalizeAction NoF16Action) {
- setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
- };
-
- addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
- addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
- addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
- addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
- addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
- addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
- addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
- addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
-
- // Conversion to/from FP16/FP16x2 is always legal.
- setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
- setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
-
- setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
- setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
-
- // Operations not directly supported by NVPTX.
- setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- setOperationAction(ISD::BR_CC, MVT::f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- setOperationAction(ISD::BR_CC, MVT::f64, Expand);
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::BR_CC, MVT::i8, Expand);
- setOperationAction(ISD::BR_CC, MVT::i16, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);
- // Some SIGN_EXTEND_INREG can be done using cvt instruction.
- // For others we will expand to a SHL/SRA pair.
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
-
- setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
-
- if (STI.hasROT64()) {
- setOperationAction(ISD::ROTL, MVT::i64, Legal);
- setOperationAction(ISD::ROTR, MVT::i64, Legal);
- } else {
- setOperationAction(ISD::ROTL, MVT::i64, Expand);
- setOperationAction(ISD::ROTR, MVT::i64, Expand);
- }
- if (STI.hasROT32()) {
- setOperationAction(ISD::ROTL, MVT::i32, Legal);
- setOperationAction(ISD::ROTR, MVT::i32, Legal);
- } else {
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTR, MVT::i32, Expand);
- }
-
- setOperationAction(ISD::ROTL, MVT::i16, Expand);
- setOperationAction(ISD::ROTR, MVT::i16, Expand);
- setOperationAction(ISD::ROTL, MVT::i8, Expand);
- setOperationAction(ISD::ROTR, MVT::i8, Expand);
- setOperationAction(ISD::BSWAP, MVT::i16, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- setOperationAction(ISD::BSWAP, MVT::i64, Expand);
-
- // Indirect branch is not supported.
- // This also disables Jump Table creation.
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
-
- // We want to legalize constant related memmove and memcopy
- // intrinsics.
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
-
- // Turn FP extload into load/fpextend
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
- // Turn FP truncstore into trunc + store.
- // FIXME: vector types should also be expanded
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- // PTX does not support load / store predicate registers
- setOperationAction(ISD::LOAD, MVT::i1, Custom);
- setOperationAction(ISD::STORE, MVT::i1, Custom);
-
- for (MVT VT : MVT::integer_valuetypes()) {
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
- setTruncStoreAction(VT, MVT::i1, Expand);
- }
-
- // This is legal in NVPTX
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
-
- // TRAP can be lowered to PTX trap
- setOperationAction(ISD::TRAP, MVT::Other, Legal);
-
- setOperationAction(ISD::ADDC, MVT::i64, Expand);
- setOperationAction(ISD::ADDE, MVT::i64, Expand);
-
- // Register custom handling for vector loads/stores
- for (MVT VT : MVT::vector_valuetypes()) {
- if (IsPTXVectorType(VT)) {
- setOperationAction(ISD::LOAD, VT, Custom);
- setOperationAction(ISD::STORE, VT, Custom);
- setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
- }
- }
-
- // Custom handling for i8 intrinsics
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
-
- for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
- setOperationAction(ISD::SMIN, Ty, Legal);
- setOperationAction(ISD::SMAX, Ty, Legal);
- setOperationAction(ISD::UMIN, Ty, Legal);
- setOperationAction(ISD::UMAX, Ty, Legal);
-
- setOperationAction(ISD::CTPOP, Ty, Legal);
- setOperationAction(ISD::CTLZ, Ty, Legal);
- }
-
- setOperationAction(ISD::CTTZ, MVT::i16, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i64, Expand);
-
- // PTX does not directly support SELP of i1, so promote to i32 first
- setOperationAction(ISD::SELECT, MVT::i1, Custom);
-
- // PTX cannot multiply two i64s in a single instruction.
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-
- // We have some custom DAG combine patterns for these nodes
- setTargetDAGCombine(ISD::ADD);
- setTargetDAGCombine(ISD::AND);
- setTargetDAGCombine(ISD::FADD);
- setTargetDAGCombine(ISD::MUL);
- setTargetDAGCombine(ISD::SHL);
- setTargetDAGCombine(ISD::SREM);
- setTargetDAGCombine(ISD::UREM);
-
- // setcc for f16x2 needs special handling to prevent legalizer's
- // attempt to scalarize it due to v2i1 not being legal.
- if (STI.allowFP16Math())
- setTargetDAGCombine(ISD::SETCC);
-
- // Promote fp16 arithmetic if fp16 hardware isn't available or the
- // user passed --nvptx-no-fp16-math. The flag is useful because,
- // although sm_53+ GPUs have some sort of FP16 support in
- // hardware, only sm_53 and sm_60 have full implementation. Others
- // only have token amount of hardware and are likely to run faster
- // by using fp32 units instead.
- for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
- setFP16OperationAction(Op, MVT::f16, Legal, Promote);
- setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
- }
-
- // There's no neg.f16 instruction. Expand to (0-x).
- setOperationAction(ISD::FNEG, MVT::f16, Expand);
- setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
-
- // (would be) Library functions.
-
- // These map to conversion instructions for scalar FP types.
- for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
- ISD::FROUND, ISD::FTRUNC}) {
- setOperationAction(Op, MVT::f16, Legal);
- setOperationAction(Op, MVT::f32, Legal);
- setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
- }
-
- // 'Expand' implements FCOPYSIGN without calling an external library.
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-
- // These map to corresponding instructions for f32/f64. f16 must be
- // promoted to f32. v2f16 is expanded to f16, which is then promoted
- // to f32.
- for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
- ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
- setOperationAction(Op, MVT::f16, Promote);
- setOperationAction(Op, MVT::f32, Legal);
- setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
- }
- setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
-
- // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
- // No FPOW or FREM in PTX.
-
- // Now deduce the information based on the above mentioned
- // actions
- computeRegisterProperties(STI.getRegisterInfo());
-}
-
-const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch ((NVPTXISD::NodeType)Opcode) {
- case NVPTXISD::FIRST_NUMBER:
- break;
- case NVPTXISD::CALL:
- return "NVPTXISD::CALL";
- case NVPTXISD::RET_FLAG:
- return "NVPTXISD::RET_FLAG";
- case NVPTXISD::LOAD_PARAM:
- return "NVPTXISD::LOAD_PARAM";
- case NVPTXISD::Wrapper:
- return "NVPTXISD::Wrapper";
- case NVPTXISD::DeclareParam:
- return "NVPTXISD::DeclareParam";
- case NVPTXISD::DeclareScalarParam:
- return "NVPTXISD::DeclareScalarParam";
- case NVPTXISD::DeclareRet:
- return "NVPTXISD::DeclareRet";
- case NVPTXISD::DeclareScalarRet:
- return "NVPTXISD::DeclareScalarRet";
- case NVPTXISD::DeclareRetParam:
- return "NVPTXISD::DeclareRetParam";
- case NVPTXISD::PrintCall:
- return "NVPTXISD::PrintCall";
- case NVPTXISD::PrintConvergentCall:
- return "NVPTXISD::PrintConvergentCall";
- case NVPTXISD::PrintCallUni:
- return "NVPTXISD::PrintCallUni";
- case NVPTXISD::PrintConvergentCallUni:
- return "NVPTXISD::PrintConvergentCallUni";
- case NVPTXISD::LoadParam:
- return "NVPTXISD::LoadParam";
- case NVPTXISD::LoadParamV2:
- return "NVPTXISD::LoadParamV2";
- case NVPTXISD::LoadParamV4:
- return "NVPTXISD::LoadParamV4";
- case NVPTXISD::StoreParam:
- return "NVPTXISD::StoreParam";
- case NVPTXISD::StoreParamV2:
- return "NVPTXISD::StoreParamV2";
- case NVPTXISD::StoreParamV4:
- return "NVPTXISD::StoreParamV4";
- case NVPTXISD::StoreParamS32:
- return "NVPTXISD::StoreParamS32";
- case NVPTXISD::StoreParamU32:
- return "NVPTXISD::StoreParamU32";
- case NVPTXISD::CallArgBegin:
- return "NVPTXISD::CallArgBegin";
- case NVPTXISD::CallArg:
- return "NVPTXISD::CallArg";
- case NVPTXISD::LastCallArg:
- return "NVPTXISD::LastCallArg";
- case NVPTXISD::CallArgEnd:
- return "NVPTXISD::CallArgEnd";
- case NVPTXISD::CallVoid:
- return "NVPTXISD::CallVoid";
- case NVPTXISD::CallVal:
- return "NVPTXISD::CallVal";
- case NVPTXISD::CallSymbol:
- return "NVPTXISD::CallSymbol";
- case NVPTXISD::Prototype:
- return "NVPTXISD::Prototype";
- case NVPTXISD::MoveParam:
- return "NVPTXISD::MoveParam";
- case NVPTXISD::StoreRetval:
- return "NVPTXISD::StoreRetval";
- case NVPTXISD::StoreRetvalV2:
- return "NVPTXISD::StoreRetvalV2";
- case NVPTXISD::StoreRetvalV4:
- return "NVPTXISD::StoreRetvalV4";
- case NVPTXISD::PseudoUseParam:
- return "NVPTXISD::PseudoUseParam";
- case NVPTXISD::RETURN:
- return "NVPTXISD::RETURN";
- case NVPTXISD::CallSeqBegin:
- return "NVPTXISD::CallSeqBegin";
- case NVPTXISD::CallSeqEnd:
- return "NVPTXISD::CallSeqEnd";
- case NVPTXISD::CallPrototype:
- return "NVPTXISD::CallPrototype";
- case NVPTXISD::LoadV2:
- return "NVPTXISD::LoadV2";
- case NVPTXISD::LoadV4:
- return "NVPTXISD::LoadV4";
- case NVPTXISD::LDGV2:
- return "NVPTXISD::LDGV2";
- case NVPTXISD::LDGV4:
- return "NVPTXISD::LDGV4";
- case NVPTXISD::LDUV2:
- return "NVPTXISD::LDUV2";
- case NVPTXISD::LDUV4:
- return "NVPTXISD::LDUV4";
- case NVPTXISD::StoreV2:
- return "NVPTXISD::StoreV2";
- case NVPTXISD::StoreV4:
- return "NVPTXISD::StoreV4";
- case NVPTXISD::FUN_SHFL_CLAMP:
- return "NVPTXISD::FUN_SHFL_CLAMP";
- case NVPTXISD::FUN_SHFR_CLAMP:
- return "NVPTXISD::FUN_SHFR_CLAMP";
- case NVPTXISD::IMAD:
- return "NVPTXISD::IMAD";
- case NVPTXISD::SETP_F16X2:
- return "NVPTXISD::SETP_F16X2";
- case NVPTXISD::Dummy:
- return "NVPTXISD::Dummy";
- case NVPTXISD::MUL_WIDE_SIGNED:
- return "NVPTXISD::MUL_WIDE_SIGNED";
- case NVPTXISD::MUL_WIDE_UNSIGNED:
- return "NVPTXISD::MUL_WIDE_UNSIGNED";
- case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
- case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
- case NVPTXISD::Tex1DFloatFloatLevel:
- return "NVPTXISD::Tex1DFloatFloatLevel";
- case NVPTXISD::Tex1DFloatFloatGrad:
- return "NVPTXISD::Tex1DFloatFloatGrad";
- case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
- case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
- case NVPTXISD::Tex1DS32FloatLevel:
- return "NVPTXISD::Tex1DS32FloatLevel";
- case NVPTXISD::Tex1DS32FloatGrad:
- return "NVPTXISD::Tex1DS32FloatGrad";
- case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
- case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
- case NVPTXISD::Tex1DU32FloatLevel:
- return "NVPTXISD::Tex1DU32FloatLevel";
- case NVPTXISD::Tex1DU32FloatGrad:
- return "NVPTXISD::Tex1DU32FloatGrad";
- case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
- case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
- case NVPTXISD::Tex1DArrayFloatFloatLevel:
- return "NVPTXISD::Tex1DArrayFloatFloatLevel";
- case NVPTXISD::Tex1DArrayFloatFloatGrad:
- return "NVPTXISD::Tex1DArrayFloatFloatGrad";
- case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
- case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
- case NVPTXISD::Tex1DArrayS32FloatLevel:
- return "NVPTXISD::Tex1DArrayS32FloatLevel";
- case NVPTXISD::Tex1DArrayS32FloatGrad:
- return "NVPTXISD::Tex1DArrayS32FloatGrad";
- case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
- case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
- case NVPTXISD::Tex1DArrayU32FloatLevel:
- return "NVPTXISD::Tex1DArrayU32FloatLevel";
- case NVPTXISD::Tex1DArrayU32FloatGrad:
- return "NVPTXISD::Tex1DArrayU32FloatGrad";
- case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
- case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
- case NVPTXISD::Tex2DFloatFloatLevel:
- return "NVPTXISD::Tex2DFloatFloatLevel";
- case NVPTXISD::Tex2DFloatFloatGrad:
- return "NVPTXISD::Tex2DFloatFloatGrad";
- case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
- case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
- case NVPTXISD::Tex2DS32FloatLevel:
- return "NVPTXISD::Tex2DS32FloatLevel";
- case NVPTXISD::Tex2DS32FloatGrad:
- return "NVPTXISD::Tex2DS32FloatGrad";
- case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
- case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
- case NVPTXISD::Tex2DU32FloatLevel:
- return "NVPTXISD::Tex2DU32FloatLevel";
- case NVPTXISD::Tex2DU32FloatGrad:
- return "NVPTXISD::Tex2DU32FloatGrad";
- case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
- case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
- case NVPTXISD::Tex2DArrayFloatFloatLevel:
- return "NVPTXISD::Tex2DArrayFloatFloatLevel";
- case NVPTXISD::Tex2DArrayFloatFloatGrad:
- return "NVPTXISD::Tex2DArrayFloatFloatGrad";
- case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
- case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
- case NVPTXISD::Tex2DArrayS32FloatLevel:
- return "NVPTXISD::Tex2DArrayS32FloatLevel";
- case NVPTXISD::Tex2DArrayS32FloatGrad:
- return "NVPTXISD::Tex2DArrayS32FloatGrad";
- case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
- case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
- case NVPTXISD::Tex2DArrayU32FloatLevel:
- return "NVPTXISD::Tex2DArrayU32FloatLevel";
- case NVPTXISD::Tex2DArrayU32FloatGrad:
- return "NVPTXISD::Tex2DArrayU32FloatGrad";
- case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
- case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
- case NVPTXISD::Tex3DFloatFloatLevel:
- return "NVPTXISD::Tex3DFloatFloatLevel";
- case NVPTXISD::Tex3DFloatFloatGrad:
- return "NVPTXISD::Tex3DFloatFloatGrad";
- case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
- case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
- case NVPTXISD::Tex3DS32FloatLevel:
- return "NVPTXISD::Tex3DS32FloatLevel";
- case NVPTXISD::Tex3DS32FloatGrad:
- return "NVPTXISD::Tex3DS32FloatGrad";
- case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
- case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
- case NVPTXISD::Tex3DU32FloatLevel:
- return "NVPTXISD::Tex3DU32FloatLevel";
- case NVPTXISD::Tex3DU32FloatGrad:
- return "NVPTXISD::Tex3DU32FloatGrad";
- case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
- case NVPTXISD::TexCubeFloatFloatLevel:
- return "NVPTXISD::TexCubeFloatFloatLevel";
- case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
- case NVPTXISD::TexCubeS32FloatLevel:
- return "NVPTXISD::TexCubeS32FloatLevel";
- case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
- case NVPTXISD::TexCubeU32FloatLevel:
- return "NVPTXISD::TexCubeU32FloatLevel";
- case NVPTXISD::TexCubeArrayFloatFloat:
- return "NVPTXISD::TexCubeArrayFloatFloat";
- case NVPTXISD::TexCubeArrayFloatFloatLevel:
- return "NVPTXISD::TexCubeArrayFloatFloatLevel";
- case NVPTXISD::TexCubeArrayS32Float:
- return "NVPTXISD::TexCubeArrayS32Float";
- case NVPTXISD::TexCubeArrayS32FloatLevel:
- return "NVPTXISD::TexCubeArrayS32FloatLevel";
- case NVPTXISD::TexCubeArrayU32Float:
- return "NVPTXISD::TexCubeArrayU32Float";
- case NVPTXISD::TexCubeArrayU32FloatLevel:
- return "NVPTXISD::TexCubeArrayU32FloatLevel";
- case NVPTXISD::Tld4R2DFloatFloat:
- return "NVPTXISD::Tld4R2DFloatFloat";
- case NVPTXISD::Tld4G2DFloatFloat:
- return "NVPTXISD::Tld4G2DFloatFloat";
- case NVPTXISD::Tld4B2DFloatFloat:
- return "NVPTXISD::Tld4B2DFloatFloat";
- case NVPTXISD::Tld4A2DFloatFloat:
- return "NVPTXISD::Tld4A2DFloatFloat";
- case NVPTXISD::Tld4R2DS64Float:
- return "NVPTXISD::Tld4R2DS64Float";
- case NVPTXISD::Tld4G2DS64Float:
- return "NVPTXISD::Tld4G2DS64Float";
- case NVPTXISD::Tld4B2DS64Float:
- return "NVPTXISD::Tld4B2DS64Float";
- case NVPTXISD::Tld4A2DS64Float:
- return "NVPTXISD::Tld4A2DS64Float";
- case NVPTXISD::Tld4R2DU64Float:
- return "NVPTXISD::Tld4R2DU64Float";
- case NVPTXISD::Tld4G2DU64Float:
- return "NVPTXISD::Tld4G2DU64Float";
- case NVPTXISD::Tld4B2DU64Float:
- return "NVPTXISD::Tld4B2DU64Float";
- case NVPTXISD::Tld4A2DU64Float:
- return "NVPTXISD::Tld4A2DU64Float";
-
- case NVPTXISD::TexUnified1DFloatS32:
- return "NVPTXISD::TexUnified1DFloatS32";
- case NVPTXISD::TexUnified1DFloatFloat:
- return "NVPTXISD::TexUnified1DFloatFloat";
- case NVPTXISD::TexUnified1DFloatFloatLevel:
- return "NVPTXISD::TexUnified1DFloatFloatLevel";
- case NVPTXISD::TexUnified1DFloatFloatGrad:
- return "NVPTXISD::TexUnified1DFloatFloatGrad";
- case NVPTXISD::TexUnified1DS32S32:
- return "NVPTXISD::TexUnified1DS32S32";
- case NVPTXISD::TexUnified1DS32Float:
- return "NVPTXISD::TexUnified1DS32Float";
- case NVPTXISD::TexUnified1DS32FloatLevel:
- return "NVPTXISD::TexUnified1DS32FloatLevel";
- case NVPTXISD::TexUnified1DS32FloatGrad:
- return "NVPTXISD::TexUnified1DS32FloatGrad";
- case NVPTXISD::TexUnified1DU32S32:
- return "NVPTXISD::TexUnified1DU32S32";
- case NVPTXISD::TexUnified1DU32Float:
- return "NVPTXISD::TexUnified1DU32Float";
- case NVPTXISD::TexUnified1DU32FloatLevel:
- return "NVPTXISD::TexUnified1DU32FloatLevel";
- case NVPTXISD::TexUnified1DU32FloatGrad:
- return "NVPTXISD::TexUnified1DU32FloatGrad";
- case NVPTXISD::TexUnified1DArrayFloatS32:
- return "NVPTXISD::TexUnified1DArrayFloatS32";
- case NVPTXISD::TexUnified1DArrayFloatFloat:
- return "NVPTXISD::TexUnified1DArrayFloatFloat";
- case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
- return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
- case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
- return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
- case NVPTXISD::TexUnified1DArrayS32S32:
- return "NVPTXISD::TexUnified1DArrayS32S32";
- case NVPTXISD::TexUnified1DArrayS32Float:
- return "NVPTXISD::TexUnified1DArrayS32Float";
- case NVPTXISD::TexUnified1DArrayS32FloatLevel:
- return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
- case NVPTXISD::TexUnified1DArrayS32FloatGrad:
- return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
- case NVPTXISD::TexUnified1DArrayU32S32:
- return "NVPTXISD::TexUnified1DArrayU32S32";
- case NVPTXISD::TexUnified1DArrayU32Float:
- return "NVPTXISD::TexUnified1DArrayU32Float";
- case NVPTXISD::TexUnified1DArrayU32FloatLevel:
- return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
- case NVPTXISD::TexUnified1DArrayU32FloatGrad:
- return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
- case NVPTXISD::TexUnified2DFloatS32:
- return "NVPTXISD::TexUnified2DFloatS32";
- case NVPTXISD::TexUnified2DFloatFloat:
- return "NVPTXISD::TexUnified2DFloatFloat";
- case NVPTXISD::TexUnified2DFloatFloatLevel:
- return "NVPTXISD::TexUnified2DFloatFloatLevel";
- case NVPTXISD::TexUnified2DFloatFloatGrad:
- return "NVPTXISD::TexUnified2DFloatFloatGrad";
- case NVPTXISD::TexUnified2DS32S32:
- return "NVPTXISD::TexUnified2DS32S32";
- case NVPTXISD::TexUnified2DS32Float:
- return "NVPTXISD::TexUnified2DS32Float";
- case NVPTXISD::TexUnified2DS32FloatLevel:
- return "NVPTXISD::TexUnified2DS32FloatLevel";
- case NVPTXISD::TexUnified2DS32FloatGrad:
- return "NVPTXISD::TexUnified2DS32FloatGrad";
- case NVPTXISD::TexUnified2DU32S32:
- return "NVPTXISD::TexUnified2DU32S32";
- case NVPTXISD::TexUnified2DU32Float:
- return "NVPTXISD::TexUnified2DU32Float";
- case NVPTXISD::TexUnified2DU32FloatLevel:
- return "NVPTXISD::TexUnified2DU32FloatLevel";
- case NVPTXISD::TexUnified2DU32FloatGrad:
- return "NVPTXISD::TexUnified2DU32FloatGrad";
- case NVPTXISD::TexUnified2DArrayFloatS32:
- return "NVPTXISD::TexUnified2DArrayFloatS32";
- case NVPTXISD::TexUnified2DArrayFloatFloat:
- return "NVPTXISD::TexUnified2DArrayFloatFloat";
- case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
- return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
- case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
- return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
- case NVPTXISD::TexUnified2DArrayS32S32:
- return "NVPTXISD::TexUnified2DArrayS32S32";
- case NVPTXISD::TexUnified2DArrayS32Float:
- return "NVPTXISD::TexUnified2DArrayS32Float";
- case NVPTXISD::TexUnified2DArrayS32FloatLevel:
- return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
- case NVPTXISD::TexUnified2DArrayS32FloatGrad:
- return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
- case NVPTXISD::TexUnified2DArrayU32S32:
- return "NVPTXISD::TexUnified2DArrayU32S32";
- case NVPTXISD::TexUnified2DArrayU32Float:
- return "NVPTXISD::TexUnified2DArrayU32Float";
- case NVPTXISD::TexUnified2DArrayU32FloatLevel:
- return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
- case NVPTXISD::TexUnified2DArrayU32FloatGrad:
- return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
- case NVPTXISD::TexUnified3DFloatS32:
- return "NVPTXISD::TexUnified3DFloatS32";
- case NVPTXISD::TexUnified3DFloatFloat:
- return "NVPTXISD::TexUnified3DFloatFloat";
- case NVPTXISD::TexUnified3DFloatFloatLevel:
- return "NVPTXISD::TexUnified3DFloatFloatLevel";
- case NVPTXISD::TexUnified3DFloatFloatGrad:
- return "NVPTXISD::TexUnified3DFloatFloatGrad";
- case NVPTXISD::TexUnified3DS32S32:
- return "NVPTXISD::TexUnified3DS32S32";
- case NVPTXISD::TexUnified3DS32Float:
- return "NVPTXISD::TexUnified3DS32Float";
- case NVPTXISD::TexUnified3DS32FloatLevel:
- return "NVPTXISD::TexUnified3DS32FloatLevel";
- case NVPTXISD::TexUnified3DS32FloatGrad:
- return "NVPTXISD::TexUnified3DS32FloatGrad";
- case NVPTXISD::TexUnified3DU32S32:
- return "NVPTXISD::TexUnified3DU32S32";
- case NVPTXISD::TexUnified3DU32Float:
- return "NVPTXISD::TexUnified3DU32Float";
- case NVPTXISD::TexUnified3DU32FloatLevel:
- return "NVPTXISD::TexUnified3DU32FloatLevel";
- case NVPTXISD::TexUnified3DU32FloatGrad:
- return "NVPTXISD::TexUnified3DU32FloatGrad";
- case NVPTXISD::TexUnifiedCubeFloatFloat:
- return "NVPTXISD::TexUnifiedCubeFloatFloat";
- case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
- return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
- case NVPTXISD::TexUnifiedCubeS32Float:
- return "NVPTXISD::TexUnifiedCubeS32Float";
- case NVPTXISD::TexUnifiedCubeS32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
- case NVPTXISD::TexUnifiedCubeU32Float:
- return "NVPTXISD::TexUnifiedCubeU32Float";
- case NVPTXISD::TexUnifiedCubeU32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
- case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayS32Float:
- return "NVPTXISD::TexUnifiedCubeArrayS32Float";
- case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayU32Float:
- return "NVPTXISD::TexUnifiedCubeArrayU32Float";
- case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
- case NVPTXISD::Tld4UnifiedR2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
- case NVPTXISD::Tld4UnifiedG2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
- case NVPTXISD::Tld4UnifiedB2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
- case NVPTXISD::Tld4UnifiedA2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
- case NVPTXISD::Tld4UnifiedR2DS64Float:
- return "NVPTXISD::Tld4UnifiedR2DS64Float";
- case NVPTXISD::Tld4UnifiedG2DS64Float:
- return "NVPTXISD::Tld4UnifiedG2DS64Float";
- case NVPTXISD::Tld4UnifiedB2DS64Float:
- return "NVPTXISD::Tld4UnifiedB2DS64Float";
- case NVPTXISD::Tld4UnifiedA2DS64Float:
- return "NVPTXISD::Tld4UnifiedA2DS64Float";
- case NVPTXISD::Tld4UnifiedR2DU64Float:
- return "NVPTXISD::Tld4UnifiedR2DU64Float";
- case NVPTXISD::Tld4UnifiedG2DU64Float:
- return "NVPTXISD::Tld4UnifiedG2DU64Float";
- case NVPTXISD::Tld4UnifiedB2DU64Float:
- return "NVPTXISD::Tld4UnifiedB2DU64Float";
- case NVPTXISD::Tld4UnifiedA2DU64Float:
- return "NVPTXISD::Tld4UnifiedA2DU64Float";
-
- case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
- case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
- case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
- case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
- case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
- case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
- case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
- case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
- case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
- case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
- case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
-
- case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
- case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
- case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
- case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
- case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
- case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
- case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
- case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
- case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
- case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
- case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
-
- case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
- case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
- case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
- case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
- case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
- case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
- case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
- case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
- case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
- case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
- case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
-
- case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
- case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
- case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
- case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
- case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
- case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
- case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
- case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
- case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
- case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
- case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
-
- case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
- case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
- case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
- case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
- case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
- case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
- case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
- case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
- case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
- case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
- case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
-
- case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
- case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
- case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
- case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
- case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
- case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
- case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
- case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
- case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
- case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
- case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
-
- case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
- case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
- case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
- case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
- case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
- case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
- case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
- case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
- case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
- case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
- case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
-
- case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
- case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
- case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
- case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
- case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
- case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
- case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
- case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
- case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
- case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
- case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
-
- case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
- case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
- case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
- case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
- case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
- case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
- case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
- case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
- case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
- case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
- case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
-
- case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
- case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
- case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
- case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
- case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
- case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
- case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
- case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
- case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
- case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
- case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
-
- case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
- case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
- case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
- case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
- case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
- case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
- case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
- case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
- case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
- case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
- case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
-
- case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
- case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
- case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
- case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
- case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
- case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
- case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
- case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
- case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
- case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
- case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
-
- case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
- case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
- case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
- case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
- case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
- case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
- case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
- case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
- case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
- case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
- case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
-
- case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
- case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
- case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
- case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
- case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
- case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
- case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
- case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
- case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
- case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
- case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
-
- case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
- case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
- case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
- case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
- case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
- case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
- case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
- case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
- case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
- case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
- case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
- }
- return nullptr;
-}
-
-TargetLoweringBase::LegalizeTypeAction
-NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
- if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
- return TypeSplitVector;
- if (VT == MVT::v2f16)
- return TypeLegal;
- return TargetLoweringBase::getPreferredVectorAction(VT);
-}
-
-SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
- int Enabled, int &ExtraSteps,
- bool &UseOneConst,
- bool Reciprocal) const {
- if (!(Enabled == ReciprocalEstimate::Enabled ||
- (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
- return SDValue();
-
- if (ExtraSteps == ReciprocalEstimate::Unspecified)
- ExtraSteps = 0;
-
- SDLoc DL(Operand);
- EVT VT = Operand.getValueType();
- bool Ftz = useF32FTZ(DAG.getMachineFunction());
-
- auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(IID, DL, MVT::i32), Operand);
- };
-
- // The sqrt and rsqrt refinement processes assume we always start out with an
- // approximation of the rsqrt. Therefore, if we're going to do any refinement
- // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
- // any refinement, we must return a regular sqrt.
- if (Reciprocal || ExtraSteps > 0) {
- if (VT == MVT::f32)
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
- : Intrinsic::nvvm_rsqrt_approx_f);
- else if (VT == MVT::f64)
- return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
- else
- return SDValue();
- } else {
- if (VT == MVT::f32)
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
- : Intrinsic::nvvm_sqrt_approx_f);
- else {
- // There's no sqrt.approx.f64 instruction, so we emit
- // reciprocal(rsqrt(x)). This is faster than
- // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
- // x * rsqrt(x).)
- return DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
- MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
- }
- }
-}
-
-SDValue
-NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
- return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
-}
-
-std::string NVPTXTargetLowering::getPrototype(
- const DataLayout &DL, Type *retTy, const ArgListTy &Args,
- const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
- const ImmutableCallSite *CS) const {
- auto PtrVT = getPointerTy(DL);
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return "";
-
- std::stringstream O;
- O << "prototype_" << uniqueCallSite << " : .callprototype ";
-
- if (retTy->getTypeID() == Type::VoidTyID) {
- O << "()";
- } else {
- O << "(";
- if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
- unsigned size = 0;
- if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
- size = ITy->getBitWidth();
- } else {
- assert(retTy->isFloatingPointTy() &&
- "Floating point type expected here");
- size = retTy->getPrimitiveSizeInBits();
- }
- // PTX ABI requires all scalar return values to be at least 32
- // bits in size. fp16 normally uses .b16 as its storage type in
- // PTX, so its size must be adjusted here, too.
- if (size < 32)
- size = 32;
-
- O << ".param .b" << size << " _";
- } else if (isa<PointerType>(retTy)) {
- O << ".param .b" << PtrVT.getSizeInBits() << " _";
- } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
- auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
- O << ".param .align " << retAlignment << " .b8 _["
- << DL.getTypeAllocSize(retTy) << "]";
- } else {
- llvm_unreachable("Unknown return type");
- }
- O << ") ";
- }
- O << "_ (";
-
- bool first = true;
-
- unsigned OIdx = 0;
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
- Type *Ty = Args[i].Ty;
- if (!first) {
- O << ", ";
- }
- first = false;
-
- if (!Outs[OIdx].Flags.isByVal()) {
- if (Ty->isAggregateType() || Ty->isVectorTy()) {
- unsigned align = 0;
- const CallInst *CallI = cast<CallInst>(CS->getInstruction());
- // +1 because index 0 is reserved for return type alignment
- if (!getAlign(*CallI, i + 1, align))
- align = DL.getABITypeAlignment(Ty);
- unsigned sz = DL.getTypeAllocSize(Ty);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- // update the index for Outs
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, DL, Ty, vtparts);
- if (unsigned len = vtparts.size())
- OIdx += len - 1;
- continue;
- }
- // i8 types in IR will be i16 types in SDAG
- assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
- (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
- "type mismatch between callee prototype and arguments");
- // scalar type
- unsigned sz = 0;
- if (isa<IntegerType>(Ty)) {
- sz = cast<IntegerType>(Ty)->getBitWidth();
- if (sz < 32)
- sz = 32;
- } else if (isa<PointerType>(Ty)) {
- sz = PtrVT.getSizeInBits();
- } else if (Ty->isHalfTy())
- // PTX ABI requires all scalar parameters to be at least 32
- // bits in size. fp16 normally uses .b16 as its storage type
- // in PTX, so its size must be adjusted here, too.
- sz = 32;
- else
- sz = Ty->getPrimitiveSizeInBits();
- O << ".param .b" << sz << " ";
- O << "_";
- continue;
- }
- auto *PTy = dyn_cast<PointerType>(Ty);
- assert(PTy && "Param with byval attribute should be a pointer type");
- Type *ETy = PTy->getElementType();
-
- unsigned align = Outs[OIdx].Flags.getByValAlign();
- unsigned sz = DL.getTypeAllocSize(ETy);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- }
- O << ");";
- return O.str();
-}
-
-unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
- const ImmutableCallSite *CS,
- Type *Ty, unsigned Idx,
- const DataLayout &DL) const {
- if (!CS) {
- // CallSite is zero, fallback to ABI type alignment
- return DL.getABITypeAlignment(Ty);
- }
-
- unsigned Align = 0;
- const Value *DirectCallee = CS->getCalledFunction();
-
- if (!DirectCallee) {
- // We don't have a direct function symbol, but that may be because of
- // constant cast instructions in the call.
- const Instruction *CalleeI = CS->getInstruction();
- assert(CalleeI && "Call target is not a function or derived value?");
-
- // With bitcast'd call targets, the instruction will be the call
- if (isa<CallInst>(CalleeI)) {
- // Check if we have call alignment metadata
- if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
- return Align;
-
- const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
- // Ignore any bitcast instructions
- while (isa<ConstantExpr>(CalleeV)) {
- const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
- if (!CE->isCast())
- break;
- // Look through the bitcast
- CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
- }
-
- // We have now looked past all of the bitcasts. Do we finally have a
- // Function?
- if (isa<Function>(CalleeV))
- DirectCallee = CalleeV;
- }
- }
-
- // Check for function alignment information if we found that the
- // ultimate target is a Function
- if (DirectCallee)
- if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
- return Align;
-
- // Call is indirect or alignment information is not available, fall back to
- // the ABI type alignment
- return DL.getABITypeAlignment(Ty);
-}
-
-SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- SDLoc dl = CLI.DL;
- SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
- SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
- SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &isTailCall = CLI.IsTailCall;
- ArgListTy &Args = CLI.getArgs();
- Type *RetTy = CLI.RetTy;
- ImmutableCallSite *CS = CLI.CS;
- const DataLayout &DL = DAG.getDataLayout();
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- SDValue tempChain = Chain;
- Chain = DAG.getCALLSEQ_START(
- Chain, DAG.getIntPtrConstant(uniqueCallSite, dl, true), dl);
- SDValue InFlag = Chain.getValue(1);
-
- unsigned paramCount = 0;
- // Args.size() and Outs.size() need not match.
- // Outs.size() will be larger
- // * if there is an aggregate argument with multiple fields (each field
- // showing up separately in Outs)
- // * if there is a vector argument with more than typical vector-length
- // elements (generally if more than 4) where each vector element is
- // individually present in Outs.
- // So a different index should be used for indexing into Outs/OutVals.
- // See similar issue in LowerFormalArguments.
- unsigned OIdx = 0;
- // Declare the .params or .reg need to pass values
- // to the function
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
- EVT VT = Outs[OIdx].VT;
- Type *Ty = Args[i].Ty;
-
- if (!Outs[OIdx].Flags.isByVal()) {
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
- unsigned ArgAlign =
- getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
- unsigned AllocSize = DL.getTypeAllocSize(Ty);
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- bool NeedAlign; // Does argument declaration specify alignment?
- if (Ty->isAggregateType() || Ty->isVectorTy()) {
- // declare .param .align <align> .b8 .param<n>[<size>];
- SDValue DeclareParamOps[] = {
- Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps);
- NeedAlign = true;
- } else {
- // declare .param .b<size> .param<n>;
- if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
- // PTX ABI requires integral types to be at least 32 bits in
- // size. FP16 is loaded/stored using i16, so it's handled
- // here as well.
- AllocSize = 4;
- }
- SDValue DeclareScalarParamOps[] = {
- Chain, DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(AllocSize * 8, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
- DeclareScalarParamOps);
- NeedAlign = false;
- }
- InFlag = Chain.getValue(1);
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
- // than 32-bits are sign extended or zero extended, depending on
- // whether they are signed or unsigned types. This case applies
- // only to scalar parameters and not to aggregate values.
- bool ExtendIntegerParam =
- Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
-
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
- SmallVector<SDValue, 6> StoreOperands;
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
- // New store.
- if (VectorInfo[j] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Unfinished preceeding store.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
- StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
- }
-
- EVT EltVT = VTs[j];
- SDValue StVal = OutVals[OIdx];
- if (ExtendIntegerParam) {
- assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
- // zext/sext to i32
- StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND,
- dl, MVT::i32, StVal);
- } else if (EltVT.getSizeInBits() < 16) {
- // Use 16-bit registers for small stores as it's the
- // smallest general purpose register size supported by NVPTX.
- StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
- }
-
- // Record the value to store.
- StoreOperands.push_back(StVal);
-
- if (VectorInfo[j] & PVF_LAST) {
- unsigned NumElts = StoreOperands.size() - 3;
- NVPTXISD::NodeType Op;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::StoreParam;
- break;
- case 2:
- Op = NVPTXISD::StoreParamV2;
- break;
- case 4:
- Op = NVPTXISD::StoreParamV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- StoreOperands.push_back(InFlag);
-
- // Adjust type of the store op if we've extended the scalar
- // return value.
- EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
- unsigned EltAlign =
- NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
-
- Chain = DAG.getMemIntrinsicNode(
- Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
- TheStoreType, MachinePointerInfo(), EltAlign);
- InFlag = Chain.getValue(1);
-
- // Cleanup.
- StoreOperands.clear();
- }
- ++OIdx;
- }
- assert(StoreOperands.empty() && "Unfinished parameter store.");
- if (VTs.size() > 0)
- --OIdx;
- ++paramCount;
- continue;
- }
-
- // ByVal arguments
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
- assert(PTy && "Type of a byval parameter should be pointer");
- ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
-
- // declare .param .align <align> .b8 .param<n>[<size>];
- unsigned sz = Outs[OIdx].Flags.getByValSize();
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
- // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
- // so we don't need to worry about natural alignment or not.
- // See TargetLowering::LowerCallTo().
-
- // Enforce minumum alignment of 4 to work around ptxas miscompile
- // for sm_50+. See corresponding alignment adjustment in
- // emitFunctionParamList() for details.
- if (ArgAlign < 4)
- ArgAlign = 4;
- SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(sz, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps);
- InFlag = Chain.getValue(1);
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
- EVT elemtype = VTs[j];
- int curOffset = Offsets[j];
- unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
- auto PtrVT = getPointerTy(DL);
- SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
- DAG.getConstant(curOffset, dl, PtrVT));
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), PartAlign);
- if (elemtype.getSizeInBits() < 16) {
- theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
- }
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(curOffset, dl, MVT::i32),
- theVal, InFlag };
- Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
- CopyParamOps, elemtype,
- MachinePointerInfo());
-
- InFlag = Chain.getValue(1);
- }
- ++paramCount;
- }
-
- GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
- unsigned retAlignment = 0;
-
- // Handle Result
- if (Ins.size() > 0) {
- SmallVector<EVT, 16> resvtparts;
- ComputeValueVTs(*this, DL, RetTy, resvtparts);
-
- // Declare
- // .param .align 16 .b8 retval0[<size-in-bytes>], or
- // .param .b<size-in-bits> retval0
- unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
- // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
- // these three types to match the logic in
- // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
- // Plus, this behavior is consistent with nvcc's.
- if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
- RetTy->isPointerTy()) {
- // Scalar needs to be at least 32bit wide
- if (resultsz < 32)
- resultsz = 32;
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(resultsz, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
- DeclareRetOps);
- InFlag = Chain.getValue(1);
- } else {
- retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain,
- DAG.getConstant(retAlignment, dl, MVT::i32),
- DAG.getConstant(resultsz / 8, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
- DeclareRetOps);
- InFlag = Chain.getValue(1);
- }
- }
-
- if (!Func) {
- // This is indirect function call case : PTX requires a prototype of the
- // form
- // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
- // to be emitted, and the label has to used as the last arg of call
- // instruction.
- // The prototype is embedded in a string and put as the operand for a
- // CallPrototype SDNode which will print out to the value of the string.
- SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
- const char *ProtoStr =
- nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
- SDValue ProtoOps[] = {
- Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
- };
- Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
- InFlag = Chain.getValue(1);
- }
- // Op to just print "call"
- SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrintCallOps[] = {
- Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
- };
- // We model convergent calls as separate opcodes.
- unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
- if (CLI.IsConvergent)
- Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
- : NVPTXISD::PrintConvergentCall;
- Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
- InFlag = Chain.getValue(1);
-
- // Ops to print out the function name
- SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallVoidOps[] = { Chain, Callee, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
- InFlag = Chain.getValue(1);
-
- // Ops to print out the param list
- SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgBeginOps[] = { Chain, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
- CallArgBeginOps);
- InFlag = Chain.getValue(1);
-
- for (unsigned i = 0, e = paramCount; i != e; ++i) {
- unsigned opcode;
- if (i == (e - 1))
- opcode = NVPTXISD::LastCallArg;
- else
- opcode = NVPTXISD::CallArg;
- SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(i, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
- InFlag = Chain.getValue(1);
- }
- SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgEndOps[] = { Chain,
- DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
- InFlag = Chain.getValue(1);
-
- if (!Func) {
- SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrototypeOps[] = { Chain,
- DAG.getConstant(uniqueCallSite, dl, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
- InFlag = Chain.getValue(1);
- }
-
- // Generate loads from param memory/moves from registers for result
- if (Ins.size() > 0) {
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
- assert(VTs.size() == Ins.size() && "Bad value decomposition");
-
- unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
-
- SmallVector<EVT, 6> LoadVTs;
- int VecIdx = -1; // Index of the first element of the vector.
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
- // 32-bits are sign extended or zero extended, depending on whether
- // they are signed or unsigned types.
- bool ExtendIntegerRetVal =
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
-
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- bool needTruncate = false;
- EVT TheLoadType = VTs[i];
- EVT EltType = Ins[i].VT;
- unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
- if (ExtendIntegerRetVal) {
- TheLoadType = MVT::i32;
- EltType = MVT::i32;
- needTruncate = true;
- } else if (TheLoadType.getSizeInBits() < 16) {
- if (VTs[i].isInteger())
- needTruncate = true;
- EltType = MVT::i16;
- }
-
- // Record index of the very first element of the vector.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
- VecIdx = i;
- }
-
- LoadVTs.push_back(EltType);
-
- if (VectorInfo[i] & PVF_LAST) {
- unsigned NumElts = LoadVTs.size();
- LoadVTs.push_back(MVT::Other);
- LoadVTs.push_back(MVT::Glue);
- NVPTXISD::NodeType Op;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::LoadParam;
- break;
- case 2:
- Op = NVPTXISD::LoadParamV2;
- break;
- case 4:
- Op = NVPTXISD::LoadParamV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- SDValue LoadOperands[] = {
- Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
- SDValue RetVal = DAG.getMemIntrinsicNode(
- Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
- MachinePointerInfo(), EltAlign);
-
- for (unsigned j = 0; j < NumElts; ++j) {
- SDValue Ret = RetVal.getValue(j);
- if (needTruncate)
- Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
- InVals.push_back(Ret);
- }
- Chain = RetVal.getValue(NumElts);
- InFlag = RetVal.getValue(NumElts + 1);
-
- // Cleanup
- VecIdx = -1;
- LoadVTs.clear();
- }
- }
- }
-
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(uniqueCallSite, dl, true),
- DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
- true),
- InFlag, dl);
- uniqueCallSite++;
-
- // set isTailCall to false for now, until we figure out how to express
- // tail call optimization in PTX
- isTailCall = false;
- return Chain;
-}
-
-// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
-// (see LegalizeDAG.cpp). This is slow and uses local memory.
-// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
-SDValue
-NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- SmallVector<SDValue, 8> Ops;
- unsigned NumOperands = Node->getNumOperands();
- for (unsigned i = 0; i < NumOperands; ++i) {
- SDValue SubOp = Node->getOperand(i);
- EVT VVT = SubOp.getNode()->getValueType(0);
- EVT EltVT = VVT.getVectorElementType();
- unsigned NumSubElem = VVT.getVectorNumElements();
- for (unsigned j = 0; j < NumSubElem; ++j) {
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
- DAG.getIntPtrConstant(j, dl)));
- }
- }
- return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
-}
-
-// We can init constant f16x2 with a single .b32 move. Normally it
-// would get lowered as two constant loads and vector-packing move.
-// mov.b16 %h1, 0x4000;
-// mov.b16 %h2, 0x3C00;
-// mov.b32 %hh2, {%h2, %h1};
-// Instead we want just a constant move:
-// mov.b32 %hh2, 0x40003C00
-//
-// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
-// generates good SASS in both cases.
-SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- //return Op;
- if (!(Op->getValueType(0) == MVT::v2f16 &&
- isa<ConstantFPSDNode>(Op->getOperand(0)) &&
- isa<ConstantFPSDNode>(Op->getOperand(1))))
- return Op;
-
- APInt E0 =
- cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
- APInt E1 =
- cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
- SDValue Const =
- DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
- return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
-}
-
-SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue Index = Op->getOperand(1);
- // Constant index will be matched by tablegen.
- if (isa<ConstantSDNode>(Index.getNode()))
- return Op;
-
- // Extract individual elements and select one of them.
- SDValue Vector = Op->getOperand(0);
- EVT VectorVT = Vector.getValueType();
- assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
- EVT EltVT = VectorVT.getVectorElementType();
-
- SDLoc dl(Op.getNode());
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
- DAG.getIntPtrConstant(0, dl));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
- DAG.getIntPtrConstant(1, dl));
- return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
- ISD::CondCode::SETEQ);
-}
-
-/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
-/// amount, or
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
-/// amount.
-SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
-
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- SDLoc dl(Op);
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
- unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
-
- if (VTBits == 32 && STI.getSmVersion() >= 35) {
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
- // {dHi, dLo} = {aHi, aLo} >> Amt
- // dHi = aHi >> Amt
- // dLo = shf.r.clamp aLo, aHi, Amt
-
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
- ShAmt);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
- else {
- // {dHi, dLo} = {aHi, aLo} >> Amt
- // - if (Amt>=size) then
- // dLo = aHi >> (Amt-size)
- // dHi = aHi >> Amt (this is either all 0 or all 1)
- // else
- // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
- // dHi = aHi >> Amt
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
-
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ISD::SETGE);
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
-}
-
-/// LowerShiftLeftParts - Lower SHL_PARTS, which
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
-/// amount, or
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
-/// amount.
-SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- assert(Op.getOpcode() == ISD::SHL_PARTS);
-
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- SDLoc dl(Op);
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
-
- if (VTBits == 32 && STI.getSmVersion() >= 35) {
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
- // {dHi, dLo} = {aHi, aLo} << Amt
- // dHi = shf.l.clamp aLo, aHi, Amt
- // dLo = aLo << Amt
-
- SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
- ShAmt);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
- else {
- // {dHi, dLo} = {aHi, aLo} << Amt
- // - if (Amt>=size) then
- // dLo = aLo << Amt (all 0)
- // dLo = aLo << (Amt-size)
- // else
- // dLo = aLo << Amt
- // dHi = (aHi << Amt) | (aLo >> (size-Amt))
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
-
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ISD::SETGE);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
- SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
-}
-
-SDValue
-NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- case ISD::RETURNADDR:
- return SDValue();
- case ISD::FRAMEADDR:
- return SDValue();
- case ISD::GlobalAddress:
- return LowerGlobalAddress(Op, DAG);
- case ISD::INTRINSIC_W_CHAIN:
- return Op;
- case ISD::BUILD_VECTOR:
- return LowerBUILD_VECTOR(Op, DAG);
- case ISD::EXTRACT_SUBVECTOR:
- return Op;
- case ISD::EXTRACT_VECTOR_ELT:
- return LowerEXTRACT_VECTOR_ELT(Op, DAG);
- case ISD::CONCAT_VECTORS:
- return LowerCONCAT_VECTORS(Op, DAG);
- case ISD::STORE:
- return LowerSTORE(Op, DAG);
- case ISD::LOAD:
- return LowerLOAD(Op, DAG);
- case ISD::SHL_PARTS:
- return LowerShiftLeftParts(Op, DAG);
- case ISD::SRA_PARTS:
- case ISD::SRL_PARTS:
- return LowerShiftRightParts(Op, DAG);
- case ISD::SELECT:
- return LowerSelect(Op, DAG);
- default:
- llvm_unreachable("Custom lowering not defined for operation");
- }
-}
-
-SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
- SDValue Op0 = Op->getOperand(0);
- SDValue Op1 = Op->getOperand(1);
- SDValue Op2 = Op->getOperand(2);
- SDLoc DL(Op.getNode());
-
- assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
-
- Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
- Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
- SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
-
- return Trunc;
-}
-
-SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
- if (Op.getValueType() == MVT::i1)
- return LowerLOADi1(Op, DAG);
-
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // loads and have to handle it here.
- if (Op.getValueType() == MVT::v2f16) {
- LoadSDNode *Load = cast<LoadSDNode>(Op);
- EVT MemVT = Load->getMemoryVT();
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- Load->getAddressSpace(), Load->getAlignment())) {
- SDValue Ops[2];
- std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
- return DAG.getMergeValues(Ops, SDLoc(Op));
- }
- }
-
- return SDValue();
-}
-
-// v = ld i1* addr
-// =>
-// v1 = ld i8* addr (-> i16)
-// v = trunc i16 to i1
-SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- LoadSDNode *LD = cast<LoadSDNode>(Node);
- SDLoc dl(Node);
- assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
- assert(Node->getValueType(0) == MVT::i1 &&
- "Custom lowering for i1 load only");
- SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
- LD->getPointerInfo(), LD->getAlignment(),
- LD->getMemOperand()->getFlags());
- SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
- // The legalizer (the caller) is expecting two values from the legalized
- // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
- // in LegalizeDAG.cpp which also uses MergeValues.
- SDValue Ops[] = { result, LD->getChain() };
- return DAG.getMergeValues(Ops, dl);
-}
-
-SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *Store = cast<StoreSDNode>(Op);
- EVT VT = Store->getMemoryVT();
-
- if (VT == MVT::i1)
- return LowerSTOREi1(Op, DAG);
-
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // stores and have to handle it here.
- if (VT == MVT::v2f16 &&
- !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- Store->getAddressSpace(), Store->getAlignment()))
- return expandUnalignedStore(Store, DAG);
-
- if (VT.isVector())
- return LowerSTOREVector(Op, DAG);
-
- return SDValue();
-}
-
-SDValue
-NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
- SDNode *N = Op.getNode();
- SDValue Val = N->getOperand(1);
- SDLoc DL(N);
- EVT ValVT = Val.getValueType();
-
- if (ValVT.isVector()) {
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not
- // legal. We can (and should) split that into 2 stores of <2 x double> here
- // but I'm leaving that as a TODO for now.
- if (!ValVT.isSimple())
- return SDValue();
- switch (ValVT.getSimpleVT().SimpleTy) {
- default:
- return SDValue();
- case MVT::v2i8:
- case MVT::v2i16:
- case MVT::v2i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v2f32:
- case MVT::v2f64:
- case MVT::v4i8:
- case MVT::v4i16:
- case MVT::v4i32:
- case MVT::v4f16:
- case MVT::v4f32:
- case MVT::v8f16: // <4 x f16x2>
- // This is a "native" vector type
- break;
- }
-
- MemSDNode *MemSD = cast<MemSDNode>(N);
- const DataLayout &TD = DAG.getDataLayout();
-
- unsigned Align = MemSD->getAlignment();
- unsigned PrefAlign =
- TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
- if (Align < PrefAlign) {
- // This store is not sufficiently aligned, so bail out and let this vector
- // store be scalarized. Note that we may still be able to emit smaller
- // vector stores. For example, if we are storing a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return SDValue();
- }
-
- unsigned Opcode = 0;
- EVT EltVT = ValVT.getVectorElementType();
- unsigned NumElts = ValVT.getVectorNumElements();
-
- // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // stored type to i16 and propagate the "real" type as the memory type.
- bool NeedExt = false;
- if (EltVT.getSizeInBits() < 16)
- NeedExt = true;
-
- bool StoreF16x2 = false;
- switch (NumElts) {
- default:
- return SDValue();
- case 2:
- Opcode = NVPTXISD::StoreV2;
- break;
- case 4:
- Opcode = NVPTXISD::StoreV4;
- break;
- case 8:
- // v8f16 is a special case. PTX doesn't have st.v8.f16
- // instruction. Instead, we split the vector into v2f16 chunks and
- // store them with st.v4.b32.
- assert(EltVT == MVT::f16 && "Wrong type for the vector.");
- Opcode = NVPTXISD::StoreV4;
- StoreF16x2 = true;
- break;
- }
-
- SmallVector<SDValue, 8> Ops;
-
- // First is the chain
- Ops.push_back(N->getOperand(0));
-
- if (StoreF16x2) {
- // Combine f16,f16 -> v2f16
- NumElts /= 2;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
- DAG.getIntPtrConstant(i * 2, DL));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
- DAG.getIntPtrConstant(i * 2 + 1, DL));
- SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
- Ops.push_back(V2);
- }
- } else {
- // Then the split values
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
- DAG.getIntPtrConstant(i, DL));
- if (NeedExt)
- ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
- Ops.push_back(ExtVal);
- }
- }
-
- // Then any remaining arguments
- Ops.append(N->op_begin() + 2, N->op_end());
-
- SDValue NewSt =
- DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
- MemSD->getMemoryVT(), MemSD->getMemOperand());
-
- // return DCI.CombineTo(N, NewSt, true);
- return NewSt;
- }
-
- return SDValue();
-}
-
-// st i1 v, addr
-// =>
-// v1 = zxt v to i16
-// st.u8 i16, addr
-SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- StoreSDNode *ST = cast<StoreSDNode>(Node);
- SDValue Tmp1 = ST->getChain();
- SDValue Tmp2 = ST->getBasePtr();
- SDValue Tmp3 = ST->getValue();
- assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
- Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
- SDValue Result =
- DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
- ST->getAlignment(), ST->getMemOperand()->getFlags());
- return Result;
-}
-
-SDValue
-NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
- std::string ParamSym;
- raw_string_ostream ParamStr(ParamSym);
-
- ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
- ParamStr.flush();
-
- std::string *SavedStr =
- nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
- return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
-}
-
-// Check to see if the kernel argument is image*_t or sampler_t
-
-static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
- static const char *const specialTypes[] = { "struct._image2d_t",
- "struct._image3d_t",
- "struct._sampler_t" };
-
- Type *Ty = arg->getType();
- auto *PTy = dyn_cast<PointerType>(Ty);
-
- if (!PTy)
- return false;
-
- if (!context)
- return false;
-
- auto *STy = dyn_cast<StructType>(PTy->getElementType());
- if (!STy || STy->isLiteral())
- return false;
-
- return std::find(std::begin(specialTypes), std::end(specialTypes),
- STy->getName()) != std::end(specialTypes);
-}
-
-SDValue NVPTXTargetLowering::LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- const DataLayout &DL = DAG.getDataLayout();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
-
- const Function *F = MF.getFunction();
- const AttributeList &PAL = F->getAttributes();
- const TargetLowering *TLI = STI.getTargetLowering();
-
- SDValue Root = DAG.getRoot();
- std::vector<SDValue> OutChains;
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- std::vector<Type *> argTypes;
- std::vector<const Argument *> theArgs;
- for (const Argument &I : F->args()) {
- theArgs.push_back(&I);
- argTypes.push_back(I.getType());
- }
- // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
- // Ins.size() will be larger
- // * if there is an aggregate argument with multiple fields (each field
- // showing up separately in Ins)
- // * if there is a vector argument with more than typical vector-length
- // elements (generally if more than 4) where each vector element is
- // individually present in Ins.
- // So a different index should be used for indexing into Ins.
- // See similar issue in LowerCall.
- unsigned InsIdx = 0;
-
- int idx = 0;
- for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
- Type *Ty = argTypes[i];
-
- // If the kernel argument is image*_t or sampler_t, convert it to
- // a i32 constant holding the parameter position. This can later
- // matched in the AsmPrinter to output the correct mangled name.
- if (isImageOrSamplerVal(
- theArgs[i],
- (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
- : nullptr))) {
- assert(isKernelFunction(*F) &&
- "Only kernels can have image/sampler params");
- InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
- continue;
- }
-
- if (theArgs[i]->use_empty()) {
- // argument is dead
- if (Ty->isAggregateType()) {
- SmallVector<EVT, 16> vtparts;
-
- ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
- assert(vtparts.size() > 0 && "empty aggregate type not expected");
- for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
- ++parti) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- ++InsIdx;
- }
- if (vtparts.size() > 0)
- --InsIdx;
- continue;
- }
- if (Ty->isVectorTy()) {
- EVT ObjectVT = getValueType(DL, Ty);
- unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
- for (unsigned parti = 0; parti < NumRegs; ++parti) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- ++InsIdx;
- }
- if (NumRegs > 0)
- --InsIdx;
- continue;
- }
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- continue;
- }
-
- // In the following cases, assign a node order of "idx+1"
- // to newly created nodes. The SDNodes for params have to
- // appear in the same order as their order of appearance
- // in the original function. "idx+1" holds that order.
- if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
- bool aggregateIsPacked = false;
- if (StructType *STy = dyn_cast<StructType>(Ty))
- aggregateIsPacked = STy->isPacked();
-
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
- assert(VTs.size() > 0 && "Unexpected empty type.");
- auto VectorInfo =
- VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
-
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
- int VecIdx = -1; // Index of the first element of the current vector.
- for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
- if (VectorInfo[parti] & PVF_FIRST) {
- assert(VecIdx == -1 && "Orphaned vector.");
- VecIdx = parti;
- }
-
- // That's the last element of this store op.
- if (VectorInfo[parti] & PVF_LAST) {
- unsigned NumElts = parti - VecIdx + 1;
- EVT EltVT = VTs[parti];
- // i1 is loaded/stored as i8.
- EVT LoadVT = EltVT;
- if (EltVT == MVT::i1)
- LoadVT = MVT::i8;
- else if (EltVT == MVT::v2f16)
- // getLoad needs a vector type, but it can't handle
- // vectors which contain v2f16 elements. So we must load
- // using i32 here and then bitcast back.
- LoadVT = MVT::i32;
-
- EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
- SDValue VecAddr =
- DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
- DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
- Value *srcValue = Constant::getNullValue(PointerType::get(
- EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
- SDValue P =
- DAG.getLoad(VecVT, dl, Root, VecAddr,
- MachinePointerInfo(srcValue), aggregateIsPacked,
- MachineMemOperand::MODereferenceable |
- MachineMemOperand::MOInvariant);
- if (P.getNode())
- P.getNode()->setIROrder(idx + 1);
- for (unsigned j = 0; j < NumElts; ++j) {
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
- DAG.getIntPtrConstant(j, dl));
- // We've loaded i1 as an i8 and now must truncate it back to i1
- if (EltVT == MVT::i1)
- Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
- // v2f16 was loaded as an i32. Now we must bitcast it back.
- else if (EltVT == MVT::v2f16)
- Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
- // Extend the element if necesary (e.g. an i8 is loaded
- // into an i16 register)
- if (Ins[InsIdx].VT.isInteger() &&
- Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
- unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND;
- Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
- }
- InVals.push_back(Elt);
- }
-
- // Reset vector tracking state.
- VecIdx = -1;
- }
- ++InsIdx;
- }
- if (VTs.size() > 0)
- --InsIdx;
- continue;
- }
-
- // Param has ByVal attribute
- // Return MoveParam(param symbol).
- // Ideally, the param symbol can be returned directly,
- // but when SDNode builder decides to use it in a CopyToReg(),
- // machine instruction fails because TargetExternalSymbol
- // (not lowered) is target dependent, and CopyToReg assumes
- // the source is lowered.
- EVT ObjectVT = getValueType(DL, Ty);
- assert(ObjectVT == Ins[InsIdx].VT &&
- "Ins type did not match function type");
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
- SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
- if (p.getNode())
- p.getNode()->setIROrder(idx + 1);
- InVals.push_back(p);
- }
-
- // Clang will check explicit VarArg and issue error if any. However, Clang
- // will let code with
- // implicit var arg like f() pass. See bug 617733.
- // We treat this case as if the arg list is empty.
- // if (F.isVarArg()) {
- // assert(0 && "VarArg not supported yet!");
- //}
-
- if (!OutChains.empty())
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
-
- return Chain;
-}
-
-SDValue
-NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SDLoc &dl, SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- Type *RetTy = MF.getFunction()->getReturnType();
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- const DataLayout DL = DAG.getDataLayout();
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
- assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
-
- auto VectorInfo = VectorizePTXValueVTs(
- VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
- // 32-bits are sign extended or zero extended, depending on whether
- // they are signed or unsigned types.
- bool ExtendIntegerRetVal =
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
-
- SmallVector<SDValue, 6> StoreOperands;
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- // New load/store. Record chain and offset operands.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Orphaned operand list.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
- }
-
- SDValue RetVal = OutVals[i];
- if (ExtendIntegerRetVal) {
- RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND,
- dl, MVT::i32, RetVal);
- } else if (RetVal.getValueSizeInBits() < 16) {
- // Use 16-bit registers for small load-stores as it's the
- // smallest general purpose register size supported by NVPTX.
- RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
- }
-
- // Record the value to return.
- StoreOperands.push_back(RetVal);
-
- // That's the last element of this store op.
- if (VectorInfo[i] & PVF_LAST) {
- NVPTXISD::NodeType Op;
- unsigned NumElts = StoreOperands.size() - 2;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::StoreRetval;
- break;
- case 2:
- Op = NVPTXISD::StoreRetvalV2;
- break;
- case 4:
- Op = NVPTXISD::StoreRetvalV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- // Adjust type of load/store op if we've extended the scalar
- // return value.
- EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
- Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
- StoreOperands, TheStoreType,
- MachinePointerInfo(), 1);
- // Cleanup vector state.
- StoreOperands.clear();
- }
- }
-
- return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
-}
-
-void NVPTXTargetLowering::LowerAsmOperandForConstraint(
- SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const {
- if (Constraint.length() > 1)
- return;
- else
- TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
-}
-
-static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
- switch (Intrinsic) {
- default:
- return 0;
-
- case Intrinsic::nvvm_tex_1d_v4f32_s32:
- return NVPTXISD::Tex1DFloatS32;
- case Intrinsic::nvvm_tex_1d_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloat;
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloatGrad;
- case Intrinsic::nvvm_tex_1d_v4s32_s32:
- return NVPTXISD::Tex1DS32S32;
- case Intrinsic::nvvm_tex_1d_v4s32_f32:
- return NVPTXISD::Tex1DS32Float;
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
- return NVPTXISD::Tex1DS32FloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
- return NVPTXISD::Tex1DS32FloatGrad;
- case Intrinsic::nvvm_tex_1d_v4u32_s32:
- return NVPTXISD::Tex1DU32S32;
- case Intrinsic::nvvm_tex_1d_v4u32_f32:
- return NVPTXISD::Tex1DU32Float;
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
- return NVPTXISD::Tex1DU32FloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
- return NVPTXISD::Tex1DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
- return NVPTXISD::Tex1DArrayFloatS32;
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloat;
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
- return NVPTXISD::Tex1DArrayS32S32;
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32Float;
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
- return NVPTXISD::Tex1DArrayU32S32;
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32Float;
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_2d_v4f32_s32:
- return NVPTXISD::Tex2DFloatS32;
- case Intrinsic::nvvm_tex_2d_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloat;
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloatGrad;
- case Intrinsic::nvvm_tex_2d_v4s32_s32:
- return NVPTXISD::Tex2DS32S32;
- case Intrinsic::nvvm_tex_2d_v4s32_f32:
- return NVPTXISD::Tex2DS32Float;
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
- return NVPTXISD::Tex2DS32FloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
- return NVPTXISD::Tex2DS32FloatGrad;
- case Intrinsic::nvvm_tex_2d_v4u32_s32:
- return NVPTXISD::Tex2DU32S32;
- case Intrinsic::nvvm_tex_2d_v4u32_f32:
- return NVPTXISD::Tex2DU32Float;
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
- return NVPTXISD::Tex2DU32FloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
- return NVPTXISD::Tex2DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
- return NVPTXISD::Tex2DArrayFloatS32;
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloat;
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
- return NVPTXISD::Tex2DArrayS32S32;
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32Float;
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
- return NVPTXISD::Tex2DArrayU32S32;
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32Float;
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_3d_v4f32_s32:
- return NVPTXISD::Tex3DFloatS32;
- case Intrinsic::nvvm_tex_3d_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloat;
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloatGrad;
- case Intrinsic::nvvm_tex_3d_v4s32_s32:
- return NVPTXISD::Tex3DS32S32;
- case Intrinsic::nvvm_tex_3d_v4s32_f32:
- return NVPTXISD::Tex3DS32Float;
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
- return NVPTXISD::Tex3DS32FloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
- return NVPTXISD::Tex3DS32FloatGrad;
- case Intrinsic::nvvm_tex_3d_v4u32_s32:
- return NVPTXISD::Tex3DU32S32;
- case Intrinsic::nvvm_tex_3d_v4u32_f32:
- return NVPTXISD::Tex3DU32Float;
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
- return NVPTXISD::Tex3DU32FloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
- return NVPTXISD::Tex3DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_cube_v4f32_f32:
- return NVPTXISD::TexCubeFloatFloat;
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
- return NVPTXISD::TexCubeFloatFloatLevel;
- case Intrinsic::nvvm_tex_cube_v4s32_f32:
- return NVPTXISD::TexCubeS32Float;
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
- return NVPTXISD::TexCubeS32FloatLevel;
- case Intrinsic::nvvm_tex_cube_v4u32_f32:
- return NVPTXISD::TexCubeU32Float;
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
- return NVPTXISD::TexCubeU32FloatLevel;
-
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
- return NVPTXISD::TexCubeArrayFloatFloat;
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
- return NVPTXISD::TexCubeArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
- return NVPTXISD::TexCubeArrayS32Float;
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
- return NVPTXISD::TexCubeArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
- return NVPTXISD::TexCubeArrayU32Float;
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
- return NVPTXISD::TexCubeArrayU32FloatLevel;
-
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
- return NVPTXISD::Tld4R2DFloatFloat;
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
- return NVPTXISD::Tld4G2DFloatFloat;
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
- return NVPTXISD::Tld4B2DFloatFloat;
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
- return NVPTXISD::Tld4A2DFloatFloat;
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
- return NVPTXISD::Tld4R2DS64Float;
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
- return NVPTXISD::Tld4G2DS64Float;
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
- return NVPTXISD::Tld4B2DS64Float;
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
- return NVPTXISD::Tld4A2DS64Float;
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
- return NVPTXISD::Tld4R2DU64Float;
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
- return NVPTXISD::Tld4G2DU64Float;
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
- return NVPTXISD::Tld4B2DU64Float;
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
- return NVPTXISD::Tld4A2DU64Float;
-
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
- return NVPTXISD::TexUnified1DFloatS32;
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloat;
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
- return NVPTXISD::TexUnified1DS32S32;
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
- return NVPTXISD::TexUnified1DS32Float;
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
- return NVPTXISD::TexUnified1DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
- return NVPTXISD::TexUnified1DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
- return NVPTXISD::TexUnified1DU32S32;
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
- return NVPTXISD::TexUnified1DU32Float;
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
- return NVPTXISD::TexUnified1DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
- return NVPTXISD::TexUnified1DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
- return NVPTXISD::TexUnified1DArrayFloatS32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
- return NVPTXISD::TexUnified1DArrayS32S32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32Float;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
- return NVPTXISD::TexUnified1DArrayU32S32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32Float;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
- return NVPTXISD::TexUnified2DFloatS32;
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloat;
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
- return NVPTXISD::TexUnified2DS32S32;
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
- return NVPTXISD::TexUnified2DS32Float;
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
- return NVPTXISD::TexUnified2DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
- return NVPTXISD::TexUnified2DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
- return NVPTXISD::TexUnified2DU32S32;
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
- return NVPTXISD::TexUnified2DU32Float;
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
- return NVPTXISD::TexUnified2DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
- return NVPTXISD::TexUnified2DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
- return NVPTXISD::TexUnified2DArrayFloatS32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
- return NVPTXISD::TexUnified2DArrayS32S32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32Float;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
- return NVPTXISD::TexUnified2DArrayU32S32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32Float;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
- return NVPTXISD::TexUnified3DFloatS32;
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloat;
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
- return NVPTXISD::TexUnified3DS32S32;
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
- return NVPTXISD::TexUnified3DS32Float;
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
- return NVPTXISD::TexUnified3DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
- return NVPTXISD::TexUnified3DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
- return NVPTXISD::TexUnified3DU32S32;
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
- return NVPTXISD::TexUnified3DU32Float;
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
- return NVPTXISD::TexUnified3DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
- return NVPTXISD::TexUnified3DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeFloatFloat;
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeS32Float;
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeU32Float;
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeU32FloatLevel;
-
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeArrayS32Float;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeArrayU32Float;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
-
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedR2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedG2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedB2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedA2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedR2DS64Float;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedG2DS64Float;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedB2DS64Float;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedA2DS64Float;
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedR2DU64Float;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedG2DU64Float;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedB2DU64Float;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedA2DU64Float;
- }
-}
-
-static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
- switch (Intrinsic) {
- default:
- return 0;
- case Intrinsic::nvvm_suld_1d_i8_clamp:
- return NVPTXISD::Suld1DI8Clamp;
- case Intrinsic::nvvm_suld_1d_i16_clamp:
- return NVPTXISD::Suld1DI16Clamp;
- case Intrinsic::nvvm_suld_1d_i32_clamp:
- return NVPTXISD::Suld1DI32Clamp;
- case Intrinsic::nvvm_suld_1d_i64_clamp:
- return NVPTXISD::Suld1DI64Clamp;
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:
- return NVPTXISD::Suld1DV2I8Clamp;
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:
- return NVPTXISD::Suld1DV2I16Clamp;
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:
- return NVPTXISD::Suld1DV2I32Clamp;
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:
- return NVPTXISD::Suld1DV2I64Clamp;
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:
- return NVPTXISD::Suld1DV4I8Clamp;
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:
- return NVPTXISD::Suld1DV4I16Clamp;
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:
- return NVPTXISD::Suld1DV4I32Clamp;
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:
- return NVPTXISD::Suld1DArrayI8Clamp;
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:
- return NVPTXISD::Suld1DArrayI16Clamp;
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:
- return NVPTXISD::Suld1DArrayI32Clamp;
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:
- return NVPTXISD::Suld1DArrayI64Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
- return NVPTXISD::Suld1DArrayV2I8Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
- return NVPTXISD::Suld1DArrayV2I16Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
- return NVPTXISD::Suld1DArrayV2I32Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
- return NVPTXISD::Suld1DArrayV2I64Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
- return NVPTXISD::Suld1DArrayV4I8Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
- return NVPTXISD::Suld1DArrayV4I16Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
- return NVPTXISD::Suld1DArrayV4I32Clamp;
- case Intrinsic::nvvm_suld_2d_i8_clamp:
- return NVPTXISD::Suld2DI8Clamp;
- case Intrinsic::nvvm_suld_2d_i16_clamp:
- return NVPTXISD::Suld2DI16Clamp;
- case Intrinsic::nvvm_suld_2d_i32_clamp:
- return NVPTXISD::Suld2DI32Clamp;
- case Intrinsic::nvvm_suld_2d_i64_clamp:
- return NVPTXISD::Suld2DI64Clamp;
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:
- return NVPTXISD::Suld2DV2I8Clamp;
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:
- return NVPTXISD::Suld2DV2I16Clamp;
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:
- return NVPTXISD::Suld2DV2I32Clamp;
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:
- return NVPTXISD::Suld2DV2I64Clamp;
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:
- return NVPTXISD::Suld2DV4I8Clamp;
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:
- return NVPTXISD::Suld2DV4I16Clamp;
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:
- return NVPTXISD::Suld2DV4I32Clamp;
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:
- return NVPTXISD::Suld2DArrayI8Clamp;
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:
- return NVPTXISD::Suld2DArrayI16Clamp;
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:
- return NVPTXISD::Suld2DArrayI32Clamp;
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:
- return NVPTXISD::Suld2DArrayI64Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
- return NVPTXISD::Suld2DArrayV2I8Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
- return NVPTXISD::Suld2DArrayV2I16Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
- return NVPTXISD::Suld2DArrayV2I32Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
- return NVPTXISD::Suld2DArrayV2I64Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
- return NVPTXISD::Suld2DArrayV4I8Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
- return NVPTXISD::Suld2DArrayV4I16Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
- return NVPTXISD::Suld2DArrayV4I32Clamp;
- case Intrinsic::nvvm_suld_3d_i8_clamp:
- return NVPTXISD::Suld3DI8Clamp;
- case Intrinsic::nvvm_suld_3d_i16_clamp:
- return NVPTXISD::Suld3DI16Clamp;
- case Intrinsic::nvvm_suld_3d_i32_clamp:
- return NVPTXISD::Suld3DI32Clamp;
- case Intrinsic::nvvm_suld_3d_i64_clamp:
- return NVPTXISD::Suld3DI64Clamp;
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:
- return NVPTXISD::Suld3DV2I8Clamp;
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:
- return NVPTXISD::Suld3DV2I16Clamp;
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:
- return NVPTXISD::Suld3DV2I32Clamp;
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:
- return NVPTXISD::Suld3DV2I64Clamp;
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:
- return NVPTXISD::Suld3DV4I8Clamp;
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:
- return NVPTXISD::Suld3DV4I16Clamp;
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:
- return NVPTXISD::Suld3DV4I32Clamp;
- case Intrinsic::nvvm_suld_1d_i8_trap:
- return NVPTXISD::Suld1DI8Trap;
- case Intrinsic::nvvm_suld_1d_i16_trap:
- return NVPTXISD::Suld1DI16Trap;
- case Intrinsic::nvvm_suld_1d_i32_trap:
- return NVPTXISD::Suld1DI32Trap;
- case Intrinsic::nvvm_suld_1d_i64_trap:
- return NVPTXISD::Suld1DI64Trap;
- case Intrinsic::nvvm_suld_1d_v2i8_trap:
- return NVPTXISD::Suld1DV2I8Trap;
- case Intrinsic::nvvm_suld_1d_v2i16_trap:
- return NVPTXISD::Suld1DV2I16Trap;
- case Intrinsic::nvvm_suld_1d_v2i32_trap:
- return NVPTXISD::Suld1DV2I32Trap;
- case Intrinsic::nvvm_suld_1d_v2i64_trap:
- return NVPTXISD::Suld1DV2I64Trap;
- case Intrinsic::nvvm_suld_1d_v4i8_trap:
- return NVPTXISD::Suld1DV4I8Trap;
- case Intrinsic::nvvm_suld_1d_v4i16_trap:
- return NVPTXISD::Suld1DV4I16Trap;
- case Intrinsic::nvvm_suld_1d_v4i32_trap:
- return NVPTXISD::Suld1DV4I32Trap;
- case Intrinsic::nvvm_suld_1d_array_i8_trap:
- return NVPTXISD::Suld1DArrayI8Trap;
- case Intrinsic::nvvm_suld_1d_array_i16_trap:
- return NVPTXISD::Suld1DArrayI16Trap;
- case Intrinsic::nvvm_suld_1d_array_i32_trap:
- return NVPTXISD::Suld1DArrayI32Trap;
- case Intrinsic::nvvm_suld_1d_array_i64_trap:
- return NVPTXISD::Suld1DArrayI64Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
- return NVPTXISD::Suld1DArrayV2I8Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
- return NVPTXISD::Suld1DArrayV2I16Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
- return NVPTXISD::Suld1DArrayV2I32Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
- return NVPTXISD::Suld1DArrayV2I64Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
- return NVPTXISD::Suld1DArrayV4I8Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
- return NVPTXISD::Suld1DArrayV4I16Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
- return NVPTXISD::Suld1DArrayV4I32Trap;
- case Intrinsic::nvvm_suld_2d_i8_trap:
- return NVPTXISD::Suld2DI8Trap;
- case Intrinsic::nvvm_suld_2d_i16_trap:
- return NVPTXISD::Suld2DI16Trap;
- case Intrinsic::nvvm_suld_2d_i32_trap:
- return NVPTXISD::Suld2DI32Trap;
- case Intrinsic::nvvm_suld_2d_i64_trap:
- return NVPTXISD::Suld2DI64Trap;
- case Intrinsic::nvvm_suld_2d_v2i8_trap:
- return NVPTXISD::Suld2DV2I8Trap;
- case Intrinsic::nvvm_suld_2d_v2i16_trap:
- return NVPTXISD::Suld2DV2I16Trap;
- case Intrinsic::nvvm_suld_2d_v2i32_trap:
- return NVPTXISD::Suld2DV2I32Trap;
- case Intrinsic::nvvm_suld_2d_v2i64_trap:
- return NVPTXISD::Suld2DV2I64Trap;
- case Intrinsic::nvvm_suld_2d_v4i8_trap:
- return NVPTXISD::Suld2DV4I8Trap;
- case Intrinsic::nvvm_suld_2d_v4i16_trap:
- return NVPTXISD::Suld2DV4I16Trap;
- case Intrinsic::nvvm_suld_2d_v4i32_trap:
- return NVPTXISD::Suld2DV4I32Trap;
- case Intrinsic::nvvm_suld_2d_array_i8_trap:
- return NVPTXISD::Suld2DArrayI8Trap;
- case Intrinsic::nvvm_suld_2d_array_i16_trap:
- return NVPTXISD::Suld2DArrayI16Trap;
- case Intrinsic::nvvm_suld_2d_array_i32_trap:
- return NVPTXISD::Suld2DArrayI32Trap;
- case Intrinsic::nvvm_suld_2d_array_i64_trap:
- return NVPTXISD::Suld2DArrayI64Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
- return NVPTXISD::Suld2DArrayV2I8Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
- return NVPTXISD::Suld2DArrayV2I16Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
- return NVPTXISD::Suld2DArrayV2I32Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
- return NVPTXISD::Suld2DArrayV2I64Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
- return NVPTXISD::Suld2DArrayV4I8Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
- return NVPTXISD::Suld2DArrayV4I16Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
- return NVPTXISD::Suld2DArrayV4I32Trap;
- case Intrinsic::nvvm_suld_3d_i8_trap:
- return NVPTXISD::Suld3DI8Trap;
- case Intrinsic::nvvm_suld_3d_i16_trap:
- return NVPTXISD::Suld3DI16Trap;
- case Intrinsic::nvvm_suld_3d_i32_trap:
- return NVPTXISD::Suld3DI32Trap;
- case Intrinsic::nvvm_suld_3d_i64_trap:
- return NVPTXISD::Suld3DI64Trap;
- case Intrinsic::nvvm_suld_3d_v2i8_trap:
- return NVPTXISD::Suld3DV2I8Trap;
- case Intrinsic::nvvm_suld_3d_v2i16_trap:
- return NVPTXISD::Suld3DV2I16Trap;
- case Intrinsic::nvvm_suld_3d_v2i32_trap:
- return NVPTXISD::Suld3DV2I32Trap;
- case Intrinsic::nvvm_suld_3d_v2i64_trap:
- return NVPTXISD::Suld3DV2I64Trap;
- case Intrinsic::nvvm_suld_3d_v4i8_trap:
- return NVPTXISD::Suld3DV4I8Trap;
- case Intrinsic::nvvm_suld_3d_v4i16_trap:
- return NVPTXISD::Suld3DV4I16Trap;
- case Intrinsic::nvvm_suld_3d_v4i32_trap:
- return NVPTXISD::Suld3DV4I32Trap;
- case Intrinsic::nvvm_suld_1d_i8_zero:
- return NVPTXISD::Suld1DI8Zero;
- case Intrinsic::nvvm_suld_1d_i16_zero:
- return NVPTXISD::Suld1DI16Zero;
- case Intrinsic::nvvm_suld_1d_i32_zero:
- return NVPTXISD::Suld1DI32Zero;
- case Intrinsic::nvvm_suld_1d_i64_zero:
- return NVPTXISD::Suld1DI64Zero;
- case Intrinsic::nvvm_suld_1d_v2i8_zero:
- return NVPTXISD::Suld1DV2I8Zero;
- case Intrinsic::nvvm_suld_1d_v2i16_zero:
- return NVPTXISD::Suld1DV2I16Zero;
- case Intrinsic::nvvm_suld_1d_v2i32_zero:
- return NVPTXISD::Suld1DV2I32Zero;
- case Intrinsic::nvvm_suld_1d_v2i64_zero:
- return NVPTXISD::Suld1DV2I64Zero;
- case Intrinsic::nvvm_suld_1d_v4i8_zero:
- return NVPTXISD::Suld1DV4I8Zero;
- case Intrinsic::nvvm_suld_1d_v4i16_zero:
- return NVPTXISD::Suld1DV4I16Zero;
- case Intrinsic::nvvm_suld_1d_v4i32_zero:
- return NVPTXISD::Suld1DV4I32Zero;
- case Intrinsic::nvvm_suld_1d_array_i8_zero:
- return NVPTXISD::Suld1DArrayI8Zero;
- case Intrinsic::nvvm_suld_1d_array_i16_zero:
- return NVPTXISD::Suld1DArrayI16Zero;
- case Intrinsic::nvvm_suld_1d_array_i32_zero:
- return NVPTXISD::Suld1DArrayI32Zero;
- case Intrinsic::nvvm_suld_1d_array_i64_zero:
- return NVPTXISD::Suld1DArrayI64Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
- return NVPTXISD::Suld1DArrayV2I8Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
- return NVPTXISD::Suld1DArrayV2I16Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
- return NVPTXISD::Suld1DArrayV2I32Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
- return NVPTXISD::Suld1DArrayV2I64Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
- return NVPTXISD::Suld1DArrayV4I8Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
- return NVPTXISD::Suld1DArrayV4I16Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
- return NVPTXISD::Suld1DArrayV4I32Zero;
- case Intrinsic::nvvm_suld_2d_i8_zero:
- return NVPTXISD::Suld2DI8Zero;
- case Intrinsic::nvvm_suld_2d_i16_zero:
- return NVPTXISD::Suld2DI16Zero;
- case Intrinsic::nvvm_suld_2d_i32_zero:
- return NVPTXISD::Suld2DI32Zero;
- case Intrinsic::nvvm_suld_2d_i64_zero:
- return NVPTXISD::Suld2DI64Zero;
- case Intrinsic::nvvm_suld_2d_v2i8_zero:
- return NVPTXISD::Suld2DV2I8Zero;
- case Intrinsic::nvvm_suld_2d_v2i16_zero:
- return NVPTXISD::Suld2DV2I16Zero;
- case Intrinsic::nvvm_suld_2d_v2i32_zero:
- return NVPTXISD::Suld2DV2I32Zero;
- case Intrinsic::nvvm_suld_2d_v2i64_zero:
- return NVPTXISD::Suld2DV2I64Zero;
- case Intrinsic::nvvm_suld_2d_v4i8_zero:
- return NVPTXISD::Suld2DV4I8Zero;
- case Intrinsic::nvvm_suld_2d_v4i16_zero:
- return NVPTXISD::Suld2DV4I16Zero;
- case Intrinsic::nvvm_suld_2d_v4i32_zero:
- return NVPTXISD::Suld2DV4I32Zero;
- case Intrinsic::nvvm_suld_2d_array_i8_zero:
- return NVPTXISD::Suld2DArrayI8Zero;
- case Intrinsic::nvvm_suld_2d_array_i16_zero:
- return NVPTXISD::Suld2DArrayI16Zero;
- case Intrinsic::nvvm_suld_2d_array_i32_zero:
- return NVPTXISD::Suld2DArrayI32Zero;
- case Intrinsic::nvvm_suld_2d_array_i64_zero:
- return NVPTXISD::Suld2DArrayI64Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
- return NVPTXISD::Suld2DArrayV2I8Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
- return NVPTXISD::Suld2DArrayV2I16Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
- return NVPTXISD::Suld2DArrayV2I32Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
- return NVPTXISD::Suld2DArrayV2I64Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
- return NVPTXISD::Suld2DArrayV4I8Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
- return NVPTXISD::Suld2DArrayV4I16Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
- return NVPTXISD::Suld2DArrayV4I32Zero;
- case Intrinsic::nvvm_suld_3d_i8_zero:
- return NVPTXISD::Suld3DI8Zero;
- case Intrinsic::nvvm_suld_3d_i16_zero:
- return NVPTXISD::Suld3DI16Zero;
- case Intrinsic::nvvm_suld_3d_i32_zero:
- return NVPTXISD::Suld3DI32Zero;
- case Intrinsic::nvvm_suld_3d_i64_zero:
- return NVPTXISD::Suld3DI64Zero;
- case Intrinsic::nvvm_suld_3d_v2i8_zero:
- return NVPTXISD::Suld3DV2I8Zero;
- case Intrinsic::nvvm_suld_3d_v2i16_zero:
- return NVPTXISD::Suld3DV2I16Zero;
- case Intrinsic::nvvm_suld_3d_v2i32_zero:
- return NVPTXISD::Suld3DV2I32Zero;
- case Intrinsic::nvvm_suld_3d_v2i64_zero:
- return NVPTXISD::Suld3DV2I64Zero;
- case Intrinsic::nvvm_suld_3d_v4i8_zero:
- return NVPTXISD::Suld3DV4I8Zero;
- case Intrinsic::nvvm_suld_3d_v4i16_zero:
- return NVPTXISD::Suld3DV4I16Zero;
- case Intrinsic::nvvm_suld_3d_v4i32_zero:
- return NVPTXISD::Suld3DV4I32Zero;
- }
-}
-
-// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
-// TgtMemIntrinsic
-// because we need the information that is only available in the "Value" type
-// of destination
-// pointer. In particular, the address space information.
-bool NVPTXTargetLowering::getTgtMemIntrinsic(
- IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
- switch (Intrinsic) {
- default:
- return false;
-
- case Intrinsic::nvvm_atomic_load_add_f32:
- case Intrinsic::nvvm_atomic_load_inc_32:
- case Intrinsic::nvvm_atomic_load_dec_32:
-
- case Intrinsic::nvvm_atomic_add_gen_f_cta:
- case Intrinsic::nvvm_atomic_add_gen_f_sys:
- case Intrinsic::nvvm_atomic_add_gen_i_cta:
- case Intrinsic::nvvm_atomic_add_gen_i_sys:
- case Intrinsic::nvvm_atomic_and_gen_i_cta:
- case Intrinsic::nvvm_atomic_and_gen_i_sys:
- case Intrinsic::nvvm_atomic_cas_gen_i_cta:
- case Intrinsic::nvvm_atomic_cas_gen_i_sys:
- case Intrinsic::nvvm_atomic_dec_gen_i_cta:
- case Intrinsic::nvvm_atomic_dec_gen_i_sys:
- case Intrinsic::nvvm_atomic_inc_gen_i_cta:
- case Intrinsic::nvvm_atomic_inc_gen_i_sys:
- case Intrinsic::nvvm_atomic_max_gen_i_cta:
- case Intrinsic::nvvm_atomic_max_gen_i_sys:
- case Intrinsic::nvvm_atomic_min_gen_i_cta:
- case Intrinsic::nvvm_atomic_min_gen_i_sys:
- case Intrinsic::nvvm_atomic_or_gen_i_cta:
- case Intrinsic::nvvm_atomic_or_gen_i_sys:
- case Intrinsic::nvvm_atomic_exch_gen_i_cta:
- case Intrinsic::nvvm_atomic_exch_gen_i_sys:
- case Intrinsic::nvvm_atomic_xor_gen_i_cta:
- case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
- auto &DL = I.getModule()->getDataLayout();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = true;
- Info.align = 0;
- return true;
- }
-
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p: {
- auto &DL = I.getModule()->getDataLayout();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
- Info.memVT = getValueType(DL, I.getType());
- else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
- Info.memVT = getPointerTy(DL);
- else
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
-
- return true;
- }
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p: {
- auto &DL = I.getModule()->getDataLayout();
-
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
- Info.memVT = getValueType(DL, I.getType());
- else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
- Info.memVT = getPointerTy(DL);
- else
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
-
- return true;
- }
-
- case Intrinsic::nvvm_tex_1d_v4f32_s32:
- case Intrinsic::nvvm_tex_1d_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_v4f32_s32:
- case Intrinsic::nvvm_tex_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_v4f32_s32:
- case Intrinsic::nvvm_tex_3d_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
- Info.opc = getOpcForTextureInstr(Intrinsic);
- Info.memVT = MVT::v4f32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_tex_1d_v4s32_s32:
- case Intrinsic::nvvm_tex_1d_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_v4s32_s32:
- case Intrinsic::nvvm_tex_2d_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_v4s32_s32:
- case Intrinsic::nvvm_tex_3d_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_v4u32_s32:
- case Intrinsic::nvvm_tex_1d_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_v4u32_s32:
- case Intrinsic::nvvm_tex_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_v4u32_s32:
- case Intrinsic::nvvm_tex_3d_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
- Info.opc = getOpcForTextureInstr(Intrinsic);
- Info.memVT = MVT::v4i32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i8_clamp:
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
- case Intrinsic::nvvm_suld_2d_i8_clamp:
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
- case Intrinsic::nvvm_suld_3d_i8_clamp:
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:
- case Intrinsic::nvvm_suld_1d_i8_trap:
- case Intrinsic::nvvm_suld_1d_v2i8_trap:
- case Intrinsic::nvvm_suld_1d_v4i8_trap:
- case Intrinsic::nvvm_suld_1d_array_i8_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
- case Intrinsic::nvvm_suld_2d_i8_trap:
- case Intrinsic::nvvm_suld_2d_v2i8_trap:
- case Intrinsic::nvvm_suld_2d_v4i8_trap:
- case Intrinsic::nvvm_suld_2d_array_i8_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
- case Intrinsic::nvvm_suld_3d_i8_trap:
- case Intrinsic::nvvm_suld_3d_v2i8_trap:
- case Intrinsic::nvvm_suld_3d_v4i8_trap:
- case Intrinsic::nvvm_suld_1d_i8_zero:
- case Intrinsic::nvvm_suld_1d_v2i8_zero:
- case Intrinsic::nvvm_suld_1d_v4i8_zero:
- case Intrinsic::nvvm_suld_1d_array_i8_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
- case Intrinsic::nvvm_suld_2d_i8_zero:
- case Intrinsic::nvvm_suld_2d_v2i8_zero:
- case Intrinsic::nvvm_suld_2d_v4i8_zero:
- case Intrinsic::nvvm_suld_2d_array_i8_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
- case Intrinsic::nvvm_suld_3d_i8_zero:
- case Intrinsic::nvvm_suld_3d_v2i8_zero:
- case Intrinsic::nvvm_suld_3d_v4i8_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i8;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i16_clamp:
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
- case Intrinsic::nvvm_suld_2d_i16_clamp:
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
- case Intrinsic::nvvm_suld_3d_i16_clamp:
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:
- case Intrinsic::nvvm_suld_1d_i16_trap:
- case Intrinsic::nvvm_suld_1d_v2i16_trap:
- case Intrinsic::nvvm_suld_1d_v4i16_trap:
- case Intrinsic::nvvm_suld_1d_array_i16_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
- case Intrinsic::nvvm_suld_2d_i16_trap:
- case Intrinsic::nvvm_suld_2d_v2i16_trap:
- case Intrinsic::nvvm_suld_2d_v4i16_trap:
- case Intrinsic::nvvm_suld_2d_array_i16_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
- case Intrinsic::nvvm_suld_3d_i16_trap:
- case Intrinsic::nvvm_suld_3d_v2i16_trap:
- case Intrinsic::nvvm_suld_3d_v4i16_trap:
- case Intrinsic::nvvm_suld_1d_i16_zero:
- case Intrinsic::nvvm_suld_1d_v2i16_zero:
- case Intrinsic::nvvm_suld_1d_v4i16_zero:
- case Intrinsic::nvvm_suld_1d_array_i16_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
- case Intrinsic::nvvm_suld_2d_i16_zero:
- case Intrinsic::nvvm_suld_2d_v2i16_zero:
- case Intrinsic::nvvm_suld_2d_v4i16_zero:
- case Intrinsic::nvvm_suld_2d_array_i16_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
- case Intrinsic::nvvm_suld_3d_i16_zero:
- case Intrinsic::nvvm_suld_3d_v2i16_zero:
- case Intrinsic::nvvm_suld_3d_v4i16_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i16;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i32_clamp:
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
- case Intrinsic::nvvm_suld_2d_i32_clamp:
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
- case Intrinsic::nvvm_suld_3d_i32_clamp:
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:
- case Intrinsic::nvvm_suld_1d_i32_trap:
- case Intrinsic::nvvm_suld_1d_v2i32_trap:
- case Intrinsic::nvvm_suld_1d_v4i32_trap:
- case Intrinsic::nvvm_suld_1d_array_i32_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
- case Intrinsic::nvvm_suld_2d_i32_trap:
- case Intrinsic::nvvm_suld_2d_v2i32_trap:
- case Intrinsic::nvvm_suld_2d_v4i32_trap:
- case Intrinsic::nvvm_suld_2d_array_i32_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
- case Intrinsic::nvvm_suld_3d_i32_trap:
- case Intrinsic::nvvm_suld_3d_v2i32_trap:
- case Intrinsic::nvvm_suld_3d_v4i32_trap:
- case Intrinsic::nvvm_suld_1d_i32_zero:
- case Intrinsic::nvvm_suld_1d_v2i32_zero:
- case Intrinsic::nvvm_suld_1d_v4i32_zero:
- case Intrinsic::nvvm_suld_1d_array_i32_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
- case Intrinsic::nvvm_suld_2d_i32_zero:
- case Intrinsic::nvvm_suld_2d_v2i32_zero:
- case Intrinsic::nvvm_suld_2d_v4i32_zero:
- case Intrinsic::nvvm_suld_2d_array_i32_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
- case Intrinsic::nvvm_suld_3d_i32_zero:
- case Intrinsic::nvvm_suld_3d_v2i32_zero:
- case Intrinsic::nvvm_suld_3d_v4i32_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i64_clamp:
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
- case Intrinsic::nvvm_suld_2d_i64_clamp:
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
- case Intrinsic::nvvm_suld_3d_i64_clamp:
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:
- case Intrinsic::nvvm_suld_1d_i64_trap:
- case Intrinsic::nvvm_suld_1d_v2i64_trap:
- case Intrinsic::nvvm_suld_1d_array_i64_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
- case Intrinsic::nvvm_suld_2d_i64_trap:
- case Intrinsic::nvvm_suld_2d_v2i64_trap:
- case Intrinsic::nvvm_suld_2d_array_i64_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
- case Intrinsic::nvvm_suld_3d_i64_trap:
- case Intrinsic::nvvm_suld_3d_v2i64_trap:
- case Intrinsic::nvvm_suld_1d_i64_zero:
- case Intrinsic::nvvm_suld_1d_v2i64_zero:
- case Intrinsic::nvvm_suld_1d_array_i64_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
- case Intrinsic::nvvm_suld_2d_i64_zero:
- case Intrinsic::nvvm_suld_2d_v2i64_zero:
- case Intrinsic::nvvm_suld_2d_array_i64_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
- case Intrinsic::nvvm_suld_3d_i64_zero:
- case Intrinsic::nvvm_suld_3d_v2i64_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i64;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
- }
- return false;
-}
-
-/// isLegalAddressingMode - Return true if the addressing mode represented
-/// by AM is legal for this target, for a load/store of the specified type.
-/// Used to guide target specific optimizations, like loop strength reduction
-/// (LoopStrengthReduce.cpp) and memory optimization for address mode
-/// (CodeGenPrepare.cpp)
-bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
- const AddrMode &AM, Type *Ty,
- unsigned AS) const {
- // AddrMode - This represents an addressing mode of:
- // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
- //
- // The legal address modes are
- // - [avar]
- // - [areg]
- // - [areg+immoff]
- // - [immAddr]
-
- if (AM.BaseGV) {
- return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
- }
-
- switch (AM.Scale) {
- case 0: // "r", "r+i" or "i" is allowed
- break;
- case 1:
- if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
- return false;
- // Otherwise we have r+i.
- break;
- default:
- // No scale > 1 is allowed
- return false;
- }
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-NVPTXTargetLowering::ConstraintType
-NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default:
- break;
- case 'b':
- case 'r':
- case 'h':
- case 'c':
- case 'l':
- case 'f':
- case 'd':
- case '0':
- case 'N':
- return C_RegisterClass;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-std::pair<unsigned, const TargetRegisterClass *>
-NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
- StringRef Constraint,
- MVT VT) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'b':
- return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
- case 'c':
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
- case 'h':
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
- case 'r':
- return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
- case 'l':
- case 'N':
- return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
- case 'f':
- return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
- case 'd':
- return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
- }
- }
- return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX DAG Combining
-//===----------------------------------------------------------------------===//
-
-bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
- CodeGenOpt::Level OptLevel) const {
- // Always honor command-line argument
- if (FMAContractLevelOpt.getNumOccurrences() > 0)
- return FMAContractLevelOpt > 0;
-
- // Do not contract if we're not optimizing the code.
- if (OptLevel == 0)
- return false;
-
- // Honor TargetOptions flags that explicitly say fusion is okay.
- if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
- return true;
-
- return allowUnsafeFPMath(MF);
-}
-
-bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
- // Honor TargetOptions flags that explicitly say unsafe math is okay.
- if (MF.getTarget().Options.UnsafeFPMath)
- return true;
-
- // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
- const Function *F = MF.getFunction();
- if (F->hasFnAttribute("unsafe-fp-math")) {
- Attribute Attr = F->getFnAttribute("unsafe-fp-math");
- StringRef Val = Attr.getValueAsString();
- if (Val == "true")
- return true;
- }
-
- return false;
-}
-
-/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
-/// operands N0 and N1. This is a helper for PerformADDCombine that is
-/// called with the default operands, and if that fails, with commuted
-/// operands.
-static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
- TargetLowering::DAGCombinerInfo &DCI,
- const NVPTXSubtarget &Subtarget,
- CodeGenOpt::Level OptLevel) {
- SelectionDAG &DAG = DCI.DAG;
- // Skip non-integer, non-scalar case
- EVT VT=N0.getValueType();
- if (VT.isVector())
- return SDValue();
-
- // fold (add (mul a, b), c) -> (mad a, b, c)
- //
- if (N0.getOpcode() == ISD::MUL) {
- assert (VT.isInteger());
- // For integer:
- // Since integer multiply-add costs the same as integer multiply
- // but is more costly than integer add, do the fusion only when
- // the mul is only used in the add.
- if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
- !N0.getNode()->hasOneUse())
- return SDValue();
-
- // Do the folding
- return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1), N1);
- }
- else if (N0.getOpcode() == ISD::FMUL) {
- if (VT == MVT::f32 || VT == MVT::f64) {
- const auto *TLI = static_cast<const NVPTXTargetLowering *>(
- &DAG.getTargetLoweringInfo());
- if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
- return SDValue();
-
- // For floating point:
- // Do the fusion only when the mul has less than 5 uses and all
- // are add.
- // The heuristic is that if a use is not an add, then that use
- // cannot be fused into fma, therefore mul is still needed anyway.
- // If there are more than 4 uses, even if they are all add, fusing
- // them will increase register pressue.
- //
- int numUses = 0;
- int nonAddCount = 0;
- for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
- UE = N0.getNode()->use_end();
- UI != UE; ++UI) {
- numUses++;
- SDNode *User = *UI;
- if (User->getOpcode() != ISD::FADD)
- ++nonAddCount;
- }
- if (numUses >= 5)
- return SDValue();
- if (nonAddCount) {
- int orderNo = N->getIROrder();
- int orderNo2 = N0.getNode()->getIROrder();
- // simple heuristics here for considering potential register
- // pressure, the logics here is that the differnce are used
- // to measure the distance between def and use, the longer distance
- // more likely cause register pressure.
- if (orderNo - orderNo2 < 500)
- return SDValue();
-
- // Now, check if at least one of the FMUL's operands is live beyond the node N,
- // which guarantees that the FMA will not increase register pressure at node N.
- bool opIsLive = false;
- const SDNode *left = N0.getOperand(0).getNode();
- const SDNode *right = N0.getOperand(1).getNode();
-
- if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
- opIsLive = true;
-
- if (!opIsLive)
- for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- int orderNo3 = User->getIROrder();
- if (orderNo3 > orderNo) {
- opIsLive = true;
- break;
- }
- }
-
- if (!opIsLive)
- for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- int orderNo3 = User->getIROrder();
- if (orderNo3 > orderNo) {
- opIsLive = true;
- break;
- }
- }
-
- if (!opIsLive)
- return SDValue();
- }
-
- return DAG.getNode(ISD::FMA, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1), N1);
- }
- }
-
- return SDValue();
-}
-
-/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
-///
-static SDValue PerformADDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const NVPTXSubtarget &Subtarget,
- CodeGenOpt::Level OptLevel) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
-
- // First try with the default operand order.
- if (SDValue Result =
- PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
- return Result;
-
- // If that didn't work, try again with the operands commuted.
- return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
-}
-
-static SDValue PerformANDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // The type legalizer turns a vector load of i8 values into a zextload to i16
- // registers, optionally ANY_EXTENDs it (if target type is integer),
- // and ANDs off the high 8 bits. Since we turn this load into a
- // target-specific DAG node, the DAG combiner fails to eliminate these AND
- // nodes. Do that here.
- SDValue Val = N->getOperand(0);
- SDValue Mask = N->getOperand(1);
-
- if (isa<ConstantSDNode>(Val)) {
- std::swap(Val, Mask);
- }
-
- SDValue AExt;
- // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
- if (Val.getOpcode() == ISD::ANY_EXTEND) {
- AExt = Val;
- Val = Val->getOperand(0);
- }
-
- if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
- Val = Val->getOperand(0);
- }
-
- if (Val->getOpcode() == NVPTXISD::LoadV2 ||
- Val->getOpcode() == NVPTXISD::LoadV4) {
- ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
- if (!MaskCnst) {
- // Not an AND with a constant
- return SDValue();
- }
-
- uint64_t MaskVal = MaskCnst->getZExtValue();
- if (MaskVal != 0xff) {
- // Not an AND that chops off top 8 bits
- return SDValue();
- }
-
- MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
- if (!Mem) {
- // Not a MemSDNode?!?
- return SDValue();
- }
-
- EVT MemVT = Mem->getMemoryVT();
- if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
- // We only handle the i8 case
- return SDValue();
- }
-
- unsigned ExtType =
- cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
- getZExtValue();
- if (ExtType == ISD::SEXTLOAD) {
- // If for some reason the load is a sextload, the and is needed to zero
- // out the high 8 bits
- return SDValue();
- }
-
- bool AddTo = false;
- if (AExt.getNode() != nullptr) {
- // Re-insert the ext as a zext.
- Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
- AExt.getValueType(), Val);
- AddTo = true;
- }
-
- // If we get here, the AND is unnecessary. Just replace it with the load
- DCI.CombineTo(N, Val, AddTo);
- }
-
- return SDValue();
-}
-
-static SDValue PerformREMCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
-
- // Don't do anything at less than -O2.
- if (OptLevel < CodeGenOpt::Default)
- return SDValue();
-
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- bool IsSigned = N->getOpcode() == ISD::SREM;
- unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
-
- const SDValue &Num = N->getOperand(0);
- const SDValue &Den = N->getOperand(1);
-
- for (const SDNode *U : Num->uses()) {
- if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
- U->getOperand(1) == Den) {
- // Num % Den -> Num - (Num / Den) * Den
- return DAG.getNode(ISD::SUB, DL, VT, Num,
- DAG.getNode(ISD::MUL, DL, VT,
- DAG.getNode(DivOpc, DL, VT, Num, Den),
- Den));
- }
- }
- return SDValue();
-}
-
-enum OperandSignedness {
- Signed = 0,
- Unsigned,
- Unknown
-};
-
-/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
-/// that can be demoted to \p OptSize bits without loss of information. The
-/// signedness of the operand, if determinable, is placed in \p S.
-static bool IsMulWideOperandDemotable(SDValue Op,
- unsigned OptSize,
- OperandSignedness &S) {
- S = Unknown;
-
- if (Op.getOpcode() == ISD::SIGN_EXTEND ||
- Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
- EVT OrigVT = Op.getOperand(0).getValueType();
- if (OrigVT.getSizeInBits() <= OptSize) {
- S = Signed;
- return true;
- }
- } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
- EVT OrigVT = Op.getOperand(0).getValueType();
- if (OrigVT.getSizeInBits() <= OptSize) {
- S = Unsigned;
- return true;
- }
- }
-
- return false;
-}
-
-/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
-/// be demoted to \p OptSize bits without loss of information. If the operands
-/// contain a constant, it should appear as the RHS operand. The signedness of
-/// the operands is placed in \p IsSigned.
-static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
- unsigned OptSize,
- bool &IsSigned) {
- OperandSignedness LHSSign;
-
- // The LHS operand must be a demotable op
- if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
- return false;
-
- // We should have been able to determine the signedness from the LHS
- if (LHSSign == Unknown)
- return false;
-
- IsSigned = (LHSSign == Signed);
-
- // The RHS can be a demotable op or a constant
- if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
- const APInt &Val = CI->getAPIntValue();
- if (LHSSign == Unsigned) {
- return Val.isIntN(OptSize);
- } else {
- return Val.isSignedIntN(OptSize);
- }
- } else {
- OperandSignedness RHSSign;
- if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
- return false;
-
- return LHSSign == RHSSign;
- }
-}
-
-/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
-/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
-/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
-/// amount.
-static SDValue TryMULWIDECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT MulType = N->getValueType(0);
- if (MulType != MVT::i32 && MulType != MVT::i64) {
- return SDValue();
- }
-
- SDLoc DL(N);
- unsigned OptSize = MulType.getSizeInBits() >> 1;
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
-
- // Canonicalize the multiply so the constant (if any) is on the right
- if (N->getOpcode() == ISD::MUL) {
- if (isa<ConstantSDNode>(LHS)) {
- std::swap(LHS, RHS);
- }
- }
-
- // If we have a SHL, determine the actual multiply amount
- if (N->getOpcode() == ISD::SHL) {
- ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
- if (!ShlRHS) {
- return SDValue();
- }
-
- APInt ShiftAmt = ShlRHS->getAPIntValue();
- unsigned BitWidth = MulType.getSizeInBits();
- if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
- APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
- RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
- } else {
- return SDValue();
- }
- }
-
- bool Signed;
- // Verify that our operands are demotable
- if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
- return SDValue();
- }
-
- EVT DemotedVT;
- if (MulType == MVT::i32) {
- DemotedVT = MVT::i16;
- } else {
- DemotedVT = MVT::i32;
- }
-
- // Truncate the operands to the correct size. Note that these are just for
- // type consistency and will (likely) be eliminated in later phases.
- SDValue TruncLHS =
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
- SDValue TruncRHS =
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
-
- unsigned Opc;
- if (Signed) {
- Opc = NVPTXISD::MUL_WIDE_SIGNED;
- } else {
- Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
- }
-
- return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
-}
-
-/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
-static SDValue PerformMULCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel > 0) {
- // Try mul.wide combining at OptLevel > 0
- if (SDValue Ret = TryMULWIDECombine(N, DCI))
- return Ret;
- }
-
- return SDValue();
-}
-
-/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
-static SDValue PerformSHLCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel > 0) {
- // Try mul.wide combining at OptLevel > 0
- if (SDValue Ret = TryMULWIDECombine(N, DCI))
- return Ret;
- }
-
- return SDValue();
-}
-
-static SDValue PerformSETCCCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT CCType = N->getValueType(0);
- SDValue A = N->getOperand(0);
- SDValue B = N->getOperand(1);
-
- if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
- return SDValue();
-
- SDLoc DL(N);
- // setp.f16x2 returns two scalar predicates, which we need to
- // convert back to v2i1. The returned result will be scalarized by
- // the legalizer, but the comparison will remain a single vector
- // instruction.
- SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
- DCI.DAG.getVTList(MVT::i1, MVT::i1),
- {A, B, N->getOperand(2)});
- return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
- CCNode.getValue(1));
-}
-
-SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
- switch (N->getOpcode()) {
- default: break;
- case ISD::ADD:
- case ISD::FADD:
- return PerformADDCombine(N, DCI, STI, OptLevel);
- case ISD::MUL:
- return PerformMULCombine(N, DCI, OptLevel);
- case ISD::SHL:
- return PerformSHLCombine(N, DCI, OptLevel);
- case ISD::AND:
- return PerformANDCombine(N, DCI);
- case ISD::UREM:
- case ISD::SREM:
- return PerformREMCombine(N, DCI, OptLevel);
- case ISD::SETCC:
- return PerformSETCCCombine(N, DCI);
- }
- return SDValue();
-}
-
-/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
-static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Results) {
- EVT ResVT = N->getValueType(0);
- SDLoc DL(N);
-
- assert(ResVT.isVector() && "Vector load must have vector type");
-
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not
- // legal. We can (and should) split that into 2 loads of <2 x double> here
- // but I'm leaving that as a TODO for now.
- assert(ResVT.isSimple() && "Can only handle simple types");
- switch (ResVT.getSimpleVT().SimpleTy) {
- default:
- return;
- case MVT::v2i8:
- case MVT::v2i16:
- case MVT::v2i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v2f32:
- case MVT::v2f64:
- case MVT::v4i8:
- case MVT::v4i16:
- case MVT::v4i32:
- case MVT::v4f16:
- case MVT::v4f32:
- case MVT::v8f16: // <4 x f16x2>
- // This is a "native" vector type
- break;
- }
-
- LoadSDNode *LD = cast<LoadSDNode>(N);
-
- unsigned Align = LD->getAlignment();
- auto &TD = DAG.getDataLayout();
- unsigned PrefAlign =
- TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
- if (Align < PrefAlign) {
- // This load is not sufficiently aligned, so bail out and let this vector
- // load be scalarized. Note that we may still be able to emit smaller
- // vector loads. For example, if we are loading a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return;
- }
-
- EVT EltVT = ResVT.getVectorElementType();
- unsigned NumElts = ResVT.getVectorNumElements();
-
- // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- bool NeedTrunc = false;
- if (EltVT.getSizeInBits() < 16) {
- EltVT = MVT::i16;
- NeedTrunc = true;
- }
-
- unsigned Opcode = 0;
- SDVTList LdResVTs;
- bool LoadF16x2 = false;
-
- switch (NumElts) {
- default:
- return;
- case 2:
- Opcode = NVPTXISD::LoadV2;
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
- break;
- case 4: {
- Opcode = NVPTXISD::LoadV4;
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- case 8: {
- // v8f16 is a special case. PTX doesn't have ld.v8.f16
- // instruction. Instead, we split the vector into v2f16 chunks and
- // load them with ld.v4.b32.
- assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
- LoadF16x2 = true;
- Opcode = NVPTXISD::LoadV4;
- EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
- MVT::Other};
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- }
-
- // Copy regular operands
- SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
-
- // The select routine does not have access to the LoadSDNode instance, so
- // pass along the extension information
- OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- LD->getMemoryVT(),
- LD->getMemOperand());
-
- SmallVector<SDValue, 8> ScalarRes;
- if (LoadF16x2) {
- // Split v2f16 subvectors back into individual elements.
- NumElts /= 2;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue SubVector = NewLD.getValue(i);
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
- DAG.getIntPtrConstant(0, DL));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
- DAG.getIntPtrConstant(1, DL));
- ScalarRes.push_back(E0);
- ScalarRes.push_back(E1);
- }
- } else {
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Res = NewLD.getValue(i);
- if (NeedTrunc)
- Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
- ScalarRes.push_back(Res);
- }
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
-
- Results.push_back(BuildVec);
- Results.push_back(LoadChain);
-}
-
-static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Results) {
- SDValue Chain = N->getOperand(0);
- SDValue Intrin = N->getOperand(1);
- SDLoc DL(N);
-
- // Get the intrinsic ID
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p: {
- EVT ResVT = N->getValueType(0);
-
- if (ResVT.isVector()) {
- // Vector LDG/LDU
-
- unsigned NumElts = ResVT.getVectorNumElements();
- EVT EltVT = ResVT.getVectorElementType();
-
- // Since LDU/LDG are target nodes, we cannot rely on DAG type
- // legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- bool NeedTrunc = false;
- if (EltVT.getSizeInBits() < 16) {
- EltVT = MVT::i16;
- NeedTrunc = true;
- }
-
- unsigned Opcode = 0;
- SDVTList LdResVTs;
-
- switch (NumElts) {
- default:
- return;
- case 2:
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- Opcode = NVPTXISD::LDGV2;
- break;
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
- Opcode = NVPTXISD::LDUV2;
- break;
- }
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
- break;
- case 4: {
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- Opcode = NVPTXISD::LDGV4;
- break;
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
- Opcode = NVPTXISD::LDUV4;
- break;
- }
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- }
-
- SmallVector<SDValue, 8> OtherOps;
-
- // Copy regular operands
-
- OtherOps.push_back(Chain); // Chain
- // Skip operand 1 (intrinsic ID)
- // Others
- OtherOps.append(N->op_begin() + 2, N->op_end());
-
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- MemSD->getMemoryVT(),
- MemSD->getMemOperand());
-
- SmallVector<SDValue, 4> ScalarRes;
-
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Res = NewLD.getValue(i);
- if (NeedTrunc)
- Res =
- DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
- ScalarRes.push_back(Res);
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- SDValue BuildVec =
- DAG.getBuildVector(ResVT, DL, ScalarRes);
-
- Results.push_back(BuildVec);
- Results.push_back(LoadChain);
- } else {
- // i8 LDG/LDU
- assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
- "Custom handling of non-i8 ldu/ldg?");
-
- // Just copy all operands as-is
- SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
-
- // Force output to i16
- SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
-
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
-
- // We make sure the memory type is i8, which will be used during isel
- // to select the proper instruction.
- SDValue NewLD =
- DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
- MVT::i8, MemSD->getMemOperand());
-
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
- NewLD.getValue(0)));
- Results.push_back(NewLD.getValue(1));
- }
- }
- }
-}
-
-void NVPTXTargetLowering::ReplaceNodeResults(
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
- switch (N->getOpcode()) {
- default:
- report_fatal_error("Unhandled custom legalization");
- case ISD::LOAD:
- ReplaceLoadVector(N, DAG, Results);
- return;
- case ISD::INTRINSIC_W_CHAIN:
- ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
- return;
- }
-}
-
-// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
-void NVPTXSection::anchor() {}
-
-NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
- delete static_cast<NVPTXSection *>(TextSection);
- delete static_cast<NVPTXSection *>(DataSection);
- delete static_cast<NVPTXSection *>(BSSSection);
- delete static_cast<NVPTXSection *>(ReadOnlySection);
-
- delete static_cast<NVPTXSection *>(StaticCtorSection);
- delete static_cast<NVPTXSection *>(StaticDtorSection);
- delete static_cast<NVPTXSection *>(LSDASection);
- delete static_cast<NVPTXSection *>(EHFrameSection);
- delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
- delete static_cast<NVPTXSection *>(DwarfInfoSection);
- delete static_cast<NVPTXSection *>(DwarfLineSection);
- delete static_cast<NVPTXSection *>(DwarfFrameSection);
- delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
- delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
- delete static_cast<NVPTXSection *>(DwarfStrSection);
- delete static_cast<NVPTXSection *>(DwarfLocSection);
- delete static_cast<NVPTXSection *>(DwarfARangesSection);
- delete static_cast<NVPTXSection *>(DwarfRangesSection);
- delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
-}
-
-MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
- const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
- return getDataSection();
-}
+//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that NVPTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "NVPTX.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXSection.h"
+#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXTargetObjectFile.h"
+#include "NVPTXUtilities.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetCallingConv.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "nvptx-lower"
+
+using namespace llvm;
+
+static unsigned int uniqueCallSite = 0;
+
+static cl::opt<bool> sched4reg(
+ "nvptx-sched4reg",
+ cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
+
+static cl::opt<unsigned>
+FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
+ " 1: do it 2: do it aggressively"),
+ cl::init(2));
+
+static cl::opt<int> UsePrecDivF32(
+ "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
+ " IEEE Compliant F32 div.rnd if available."),
+ cl::init(2));
+
+static cl::opt<bool> UsePrecSqrtF32(
+ "nvptx-prec-sqrtf32", cl::Hidden,
+ cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
+ cl::init(true));
+
+static cl::opt<bool> FtzEnabled(
+ "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
+ cl::init(false));
+
+int NVPTXTargetLowering::getDivF32Level() const {
+ if (UsePrecDivF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-div32=N is used on the command-line, always honor it
+ return UsePrecDivF32;
+ } else {
+ // Otherwise, use div.approx if fast math is enabled
+ if (getTargetMachine().Options.UnsafeFPMath)
+ return 0;
+ else
+ return 2;
+ }
+}
+
+bool NVPTXTargetLowering::usePrecSqrtF32() const {
+ if (UsePrecSqrtF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
+ return UsePrecSqrtF32;
+ } else {
+ // Otherwise, use sqrt.approx if fast math is enabled
+ return !getTargetMachine().Options.UnsafeFPMath;
+ }
+}
+
+bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
+ // TODO: Get rid of this flag; there can be only one way to do this.
+ if (FtzEnabled.getNumOccurrences() > 0) {
+ // If nvptx-f32ftz is used on the command-line, always honor it
+ return FtzEnabled;
+ } else {
+ const Function *F = MF.getFunction();
+ // Otherwise, check for an nvptx-f32ftz attribute on the function
+ if (F->hasFnAttribute("nvptx-f32ftz"))
+ return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
+ else
+ return false;
+ }
+}
+
+static bool IsPTXVectorType(MVT VT) {
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::v2i1:
+ case MVT::v4i1:
+ case MVT::v2i8:
+ case MVT::v4i8:
+ case MVT::v2i16:
+ case MVT::v4i16:
+ case MVT::v2i32:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v4f16:
+ case MVT::v8f16: // <4 x f16x2>
+ case MVT::v2f32:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return true;
+ }
+}
+
+/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
+/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
+/// into their primitive components.
+/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
+/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
+/// LowerCall, and LowerReturn.
+static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
+ uint64_t StartingOffset = 0) {
+ SmallVector<EVT, 16> TempVTs;
+ SmallVector<uint64_t, 16> TempOffsets;
+
+ ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
+ for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
+ EVT VT = TempVTs[i];
+ uint64_t Off = TempOffsets[i];
+ // Split vectors into individual elements, except for v2f16, which
+ // we will pass as a single scalar.
+ if (VT.isVector()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ EVT EltVT = VT.getVectorElementType();
+ // Vectors with an even number of f16 elements will be passed to
+ // us as an array of v2f16 elements. We must match this so we
+ // stay in sync with Ins/Outs.
+ if (EltVT == MVT::f16 && NumElts % 2 == 0) {
+ EltVT = MVT::v2f16;
+ NumElts /= 2;
+ }
+ for (unsigned j = 0; j != NumElts; ++j) {
+ ValueVTs.push_back(EltVT);
+ if (Offsets)
+ Offsets->push_back(Off + j * EltVT.getStoreSize());
+ }
+ } else {
+ ValueVTs.push_back(VT);
+ if (Offsets)
+ Offsets->push_back(Off);
+ }
+ }
+}
+
+// Check whether we can merge loads/stores of some of the pieces of a
+// flattened function parameter or return value into a single vector
+// load/store.
+//
+// The flattened parameter is represented as a list of EVTs and
+// offsets, and the whole structure is aligned to ParamAlignment. This
+// function determines whether we can load/store pieces of the
+// parameter starting at index Idx using a single vectorized op of
+// size AccessSize. If so, it returns the number of param pieces
+// covered by the vector op. Otherwise, it returns 1.
+static unsigned CanMergeParamLoadStoresStartingAt(
+ unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
+ assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
+
+ // Can't vectorize if param alignment is not sufficient.
+ if (AccessSize > ParamAlignment)
+ return 1;
+ // Can't vectorize if offset is not aligned.
+ if (Offsets[Idx] & (AccessSize - 1))
+ return 1;
+
+ EVT EltVT = ValueVTs[Idx];
+ unsigned EltSize = EltVT.getStoreSize();
+
+ // Element is too large to vectorize.
+ if (EltSize >= AccessSize)
+ return 1;
+
+ unsigned NumElts = AccessSize / EltSize;
+ // Can't vectorize if AccessBytes if not a multiple of EltSize.
+ if (AccessSize != EltSize * NumElts)
+ return 1;
+
+ // We don't have enough elements to vectorize.
+ if (Idx + NumElts > ValueVTs.size())
+ return 1;
+
+ // PTX ISA can only deal with 2- and 4-element vector ops.
+ if (NumElts != 4 && NumElts != 2)
+ return 1;
+
+ for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
+ // Types do not match.
+ if (ValueVTs[j] != EltVT)
+ return 1;
+
+ // Elements are not contiguous.
+ if (Offsets[j] - Offsets[j - 1] != EltSize)
+ return 1;
+ }
+ // OK. We can vectorize ValueVTs[i..i+NumElts)
+ return NumElts;
+}
+
+// Flags for tracking per-element vectorization state of loads/stores
+// of a flattened function parameter or return value.
+enum ParamVectorizationFlags {
+ PVF_INNER = 0x0, // Middle elements of a vector.
+ PVF_FIRST = 0x1, // First element of the vector.
+ PVF_LAST = 0x2, // Last element of the vector.
+ // Scalar is effectively a 1-element vector.
+ PVF_SCALAR = PVF_FIRST | PVF_LAST
+};
+
+// Computes whether and how we can vectorize the loads/stores of a
+// flattened function parameter or return value.
+//
+// The flattened parameter is represented as the list of ValueVTs and
+// Offsets, and is aligned to ParamAlignment bytes. We return a vector
+// of the same size as ValueVTs indicating how each piece should be
+// loaded/stored (i.e. as a scalar, or as part of a vector
+// load/store).
+static SmallVector<ParamVectorizationFlags, 16>
+VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets,
+ unsigned ParamAlignment) {
+ // Set vector size to match ValueVTs and mark all elements as
+ // scalars by default.
+ SmallVector<ParamVectorizationFlags, 16> VectorInfo;
+ VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
+
+ // Check what we can vectorize using 128/64/32-bit accesses.
+ for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
+ // Skip elements we've already processed.
+ assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
+ for (unsigned AccessSize : {16, 8, 4, 2}) {
+ unsigned NumElts = CanMergeParamLoadStoresStartingAt(
+ I, AccessSize, ValueVTs, Offsets, ParamAlignment);
+ // Mark vectorized elements.
+ switch (NumElts) {
+ default:
+ llvm_unreachable("Unexpected return value");
+ case 1:
+ // Can't vectorize using this size, try next smaller size.
+ continue;
+ case 2:
+ assert(I + 1 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_LAST;
+ I += 1;
+ break;
+ case 4:
+ assert(I + 3 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_INNER;
+ VectorInfo[I + 2] = PVF_INNER;
+ VectorInfo[I + 3] = PVF_LAST;
+ I += 3;
+ break;
+ }
+ // Break out of the inner loop because we've already succeeded
+ // using largest possible AccessSize.
+ break;
+ }
+ }
+ return VectorInfo;
+}
+
+// NVPTXTargetLowering Constructor.
+NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI)
+ : TargetLowering(TM), nvTM(&TM), STI(STI) {
+ // always lower memset, memcpy, and memmove intrinsics to load/store
+ // instructions, rather
+ // then generating calls to memset, mempcy or memmove.
+ MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
+
+ setBooleanContents(ZeroOrNegativeOneBooleanContent);
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+
+ // Jump is Expensive. Don't create extra control flow for 'and', 'or'
+ // condition branches.
+ setJumpIsExpensive(true);
+
+ // Wide divides are _very_ slow. Try to reduce the width of the divide if
+ // possible.
+ addBypassSlowDiv(64, 32);
+
+ // By default, use the Source scheduling
+ if (sched4reg)
+ setSchedulingPreference(Sched::RegPressure);
+ else
+ setSchedulingPreference(Sched::Source);
+
+ auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
+ LegalizeAction NoF16Action) {
+ setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
+ };
+
+ addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
+ addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
+ addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
+ addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
+ addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
+ addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
+ addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
+ addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
+
+ // Conversion to/from FP16/FP16x2 is always legal.
+ setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
+
+ setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
+ setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
+
+ // Operations not directly supported by NVPTX.
+ setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i8, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+ // Some SIGN_EXTEND_INREG can be done using cvt instruction.
+ // For others we will expand to a SHL/SRA pair.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
+
+ setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
+
+ if (STI.hasROT64()) {
+ setOperationAction(ISD::ROTL, MVT::i64, Legal);
+ setOperationAction(ISD::ROTR, MVT::i64, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ }
+ if (STI.hasROT32()) {
+ setOperationAction(ISD::ROTL, MVT::i32, Legal);
+ setOperationAction(ISD::ROTR, MVT::i32, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ }
+
+ setOperationAction(ISD::ROTL, MVT::i16, Expand);
+ setOperationAction(ISD::ROTR, MVT::i16, Expand);
+ setOperationAction(ISD::ROTL, MVT::i8, Expand);
+ setOperationAction(ISD::ROTR, MVT::i8, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i16, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Indirect branch is not supported.
+ // This also disables Jump Table creation.
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+
+ // We want to legalize constant related memmove and memcopy
+ // intrinsics.
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+
+ // Turn FP extload into load/fpextend
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
+ // Turn FP truncstore into trunc + store.
+ // FIXME: vector types should also be expanded
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // PTX does not support load / store predicate registers
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
+
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setTruncStoreAction(VT, MVT::i1, Expand);
+ }
+
+ // This is legal in NVPTX
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
+
+ // TRAP can be lowered to PTX trap
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+
+ // Register custom handling for vector loads/stores
+ for (MVT VT : MVT::vector_valuetypes()) {
+ if (IsPTXVectorType(VT)) {
+ setOperationAction(ISD::LOAD, VT, Custom);
+ setOperationAction(ISD::STORE, VT, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
+ }
+ }
+
+ // Custom handling for i8 intrinsics
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
+
+ for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
+ setOperationAction(ISD::ABS, Ty, Legal);
+ setOperationAction(ISD::SMIN, Ty, Legal);
+ setOperationAction(ISD::SMAX, Ty, Legal);
+ setOperationAction(ISD::UMIN, Ty, Legal);
+ setOperationAction(ISD::UMAX, Ty, Legal);
+
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ }
+
+ setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+
+ // PTX does not directly support SELP of i1, so promote to i32 first
+ setOperationAction(ISD::SELECT, MVT::i1, Custom);
+
+ // PTX cannot multiply two i64s in a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ // We have some custom DAG combine patterns for these nodes
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SREM);
+ setTargetDAGCombine(ISD::UREM);
+
+ // setcc for f16x2 needs special handling to prevent legalizer's
+ // attempt to scalarize it due to v2i1 not being legal.
+ if (STI.allowFP16Math())
+ setTargetDAGCombine(ISD::SETCC);
+
+ // Promote fp16 arithmetic if fp16 hardware isn't available or the
+ // user passed --nvptx-no-fp16-math. The flag is useful because,
+ // although sm_53+ GPUs have some sort of FP16 support in
+ // hardware, only sm_53 and sm_60 have full implementation. Others
+ // only have token amount of hardware and are likely to run faster
+ // by using fp32 units instead.
+ for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
+ setFP16OperationAction(Op, MVT::f16, Legal, Promote);
+ setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
+ }
+
+ // There's no neg.f16 instruction. Expand to (0-x).
+ setOperationAction(ISD::FNEG, MVT::f16, Expand);
+ setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
+
+ // (would be) Library functions.
+
+ // These map to conversion instructions for scalar FP types.
+ for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
+ ISD::FROUND, ISD::FTRUNC}) {
+ setOperationAction(Op, MVT::f16, Legal);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+
+ // 'Expand' implements FCOPYSIGN without calling an external library.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+
+ // These map to corresponding instructions for f32/f64. f16 must be
+ // promoted to f32. v2f16 is expanded to f16, which is then promoted
+ // to f32.
+ for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
+ ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
+ setOperationAction(Op, MVT::f16, Promote);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+ setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
+
+ // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
+ // No FPOW or FREM in PTX.
+
+ // Now deduce the information based on the above mentioned
+ // actions
+ computeRegisterProperties(STI.getRegisterInfo());
+}
+
+const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch ((NVPTXISD::NodeType)Opcode) {
+ case NVPTXISD::FIRST_NUMBER:
+ break;
+ case NVPTXISD::CALL:
+ return "NVPTXISD::CALL";
+ case NVPTXISD::RET_FLAG:
+ return "NVPTXISD::RET_FLAG";
+ case NVPTXISD::LOAD_PARAM:
+ return "NVPTXISD::LOAD_PARAM";
+ case NVPTXISD::Wrapper:
+ return "NVPTXISD::Wrapper";
+ case NVPTXISD::DeclareParam:
+ return "NVPTXISD::DeclareParam";
+ case NVPTXISD::DeclareScalarParam:
+ return "NVPTXISD::DeclareScalarParam";
+ case NVPTXISD::DeclareRet:
+ return "NVPTXISD::DeclareRet";
+ case NVPTXISD::DeclareScalarRet:
+ return "NVPTXISD::DeclareScalarRet";
+ case NVPTXISD::DeclareRetParam:
+ return "NVPTXISD::DeclareRetParam";
+ case NVPTXISD::PrintCall:
+ return "NVPTXISD::PrintCall";
+ case NVPTXISD::PrintConvergentCall:
+ return "NVPTXISD::PrintConvergentCall";
+ case NVPTXISD::PrintCallUni:
+ return "NVPTXISD::PrintCallUni";
+ case NVPTXISD::PrintConvergentCallUni:
+ return "NVPTXISD::PrintConvergentCallUni";
+ case NVPTXISD::LoadParam:
+ return "NVPTXISD::LoadParam";
+ case NVPTXISD::LoadParamV2:
+ return "NVPTXISD::LoadParamV2";
+ case NVPTXISD::LoadParamV4:
+ return "NVPTXISD::LoadParamV4";
+ case NVPTXISD::StoreParam:
+ return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamV2:
+ return "NVPTXISD::StoreParamV2";
+ case NVPTXISD::StoreParamV4:
+ return "NVPTXISD::StoreParamV4";
+ case NVPTXISD::StoreParamS32:
+ return "NVPTXISD::StoreParamS32";
+ case NVPTXISD::StoreParamU32:
+ return "NVPTXISD::StoreParamU32";
+ case NVPTXISD::CallArgBegin:
+ return "NVPTXISD::CallArgBegin";
+ case NVPTXISD::CallArg:
+ return "NVPTXISD::CallArg";
+ case NVPTXISD::LastCallArg:
+ return "NVPTXISD::LastCallArg";
+ case NVPTXISD::CallArgEnd:
+ return "NVPTXISD::CallArgEnd";
+ case NVPTXISD::CallVoid:
+ return "NVPTXISD::CallVoid";
+ case NVPTXISD::CallVal:
+ return "NVPTXISD::CallVal";
+ case NVPTXISD::CallSymbol:
+ return "NVPTXISD::CallSymbol";
+ case NVPTXISD::Prototype:
+ return "NVPTXISD::Prototype";
+ case NVPTXISD::MoveParam:
+ return "NVPTXISD::MoveParam";
+ case NVPTXISD::StoreRetval:
+ return "NVPTXISD::StoreRetval";
+ case NVPTXISD::StoreRetvalV2:
+ return "NVPTXISD::StoreRetvalV2";
+ case NVPTXISD::StoreRetvalV4:
+ return "NVPTXISD::StoreRetvalV4";
+ case NVPTXISD::PseudoUseParam:
+ return "NVPTXISD::PseudoUseParam";
+ case NVPTXISD::RETURN:
+ return "NVPTXISD::RETURN";
+ case NVPTXISD::CallSeqBegin:
+ return "NVPTXISD::CallSeqBegin";
+ case NVPTXISD::CallSeqEnd:
+ return "NVPTXISD::CallSeqEnd";
+ case NVPTXISD::CallPrototype:
+ return "NVPTXISD::CallPrototype";
+ case NVPTXISD::LoadV2:
+ return "NVPTXISD::LoadV2";
+ case NVPTXISD::LoadV4:
+ return "NVPTXISD::LoadV4";
+ case NVPTXISD::LDGV2:
+ return "NVPTXISD::LDGV2";
+ case NVPTXISD::LDGV4:
+ return "NVPTXISD::LDGV4";
+ case NVPTXISD::LDUV2:
+ return "NVPTXISD::LDUV2";
+ case NVPTXISD::LDUV4:
+ return "NVPTXISD::LDUV4";
+ case NVPTXISD::StoreV2:
+ return "NVPTXISD::StoreV2";
+ case NVPTXISD::StoreV4:
+ return "NVPTXISD::StoreV4";
+ case NVPTXISD::FUN_SHFL_CLAMP:
+ return "NVPTXISD::FUN_SHFL_CLAMP";
+ case NVPTXISD::FUN_SHFR_CLAMP:
+ return "NVPTXISD::FUN_SHFR_CLAMP";
+ case NVPTXISD::IMAD:
+ return "NVPTXISD::IMAD";
+ case NVPTXISD::SETP_F16X2:
+ return "NVPTXISD::SETP_F16X2";
+ case NVPTXISD::Dummy:
+ return "NVPTXISD::Dummy";
+ case NVPTXISD::MUL_WIDE_SIGNED:
+ return "NVPTXISD::MUL_WIDE_SIGNED";
+ case NVPTXISD::MUL_WIDE_UNSIGNED:
+ return "NVPTXISD::MUL_WIDE_UNSIGNED";
+ case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
+ case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ return "NVPTXISD::Tex1DFloatFloatLevel";
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ return "NVPTXISD::Tex1DFloatFloatGrad";
+ case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
+ case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
+ case NVPTXISD::Tex1DS32FloatLevel:
+ return "NVPTXISD::Tex1DS32FloatLevel";
+ case NVPTXISD::Tex1DS32FloatGrad:
+ return "NVPTXISD::Tex1DS32FloatGrad";
+ case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
+ case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
+ case NVPTXISD::Tex1DU32FloatLevel:
+ return "NVPTXISD::Tex1DU32FloatLevel";
+ case NVPTXISD::Tex1DU32FloatGrad:
+ return "NVPTXISD::Tex1DU32FloatGrad";
+ case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
+ case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex1DArrayFloatFloatLevel";
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex1DArrayFloatFloatGrad";
+ case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
+ case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ return "NVPTXISD::Tex1DArrayS32FloatLevel";
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ return "NVPTXISD::Tex1DArrayS32FloatGrad";
+ case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
+ case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ return "NVPTXISD::Tex1DArrayU32FloatLevel";
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ return "NVPTXISD::Tex1DArrayU32FloatGrad";
+ case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
+ case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ return "NVPTXISD::Tex2DFloatFloatLevel";
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ return "NVPTXISD::Tex2DFloatFloatGrad";
+ case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
+ case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
+ case NVPTXISD::Tex2DS32FloatLevel:
+ return "NVPTXISD::Tex2DS32FloatLevel";
+ case NVPTXISD::Tex2DS32FloatGrad:
+ return "NVPTXISD::Tex2DS32FloatGrad";
+ case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
+ case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
+ case NVPTXISD::Tex2DU32FloatLevel:
+ return "NVPTXISD::Tex2DU32FloatLevel";
+ case NVPTXISD::Tex2DU32FloatGrad:
+ return "NVPTXISD::Tex2DU32FloatGrad";
+ case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
+ case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex2DArrayFloatFloatLevel";
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex2DArrayFloatFloatGrad";
+ case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
+ case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ return "NVPTXISD::Tex2DArrayS32FloatLevel";
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ return "NVPTXISD::Tex2DArrayS32FloatGrad";
+ case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
+ case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ return "NVPTXISD::Tex2DArrayU32FloatLevel";
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ return "NVPTXISD::Tex2DArrayU32FloatGrad";
+ case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
+ case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ return "NVPTXISD::Tex3DFloatFloatLevel";
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ return "NVPTXISD::Tex3DFloatFloatGrad";
+ case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
+ case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
+ case NVPTXISD::Tex3DS32FloatLevel:
+ return "NVPTXISD::Tex3DS32FloatLevel";
+ case NVPTXISD::Tex3DS32FloatGrad:
+ return "NVPTXISD::Tex3DS32FloatGrad";
+ case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
+ case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
+ case NVPTXISD::Tex3DU32FloatLevel:
+ return "NVPTXISD::Tex3DU32FloatLevel";
+ case NVPTXISD::Tex3DU32FloatGrad:
+ return "NVPTXISD::Tex3DU32FloatGrad";
+ case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ return "NVPTXISD::TexCubeFloatFloatLevel";
+ case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
+ case NVPTXISD::TexCubeS32FloatLevel:
+ return "NVPTXISD::TexCubeS32FloatLevel";
+ case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
+ case NVPTXISD::TexCubeU32FloatLevel:
+ return "NVPTXISD::TexCubeU32FloatLevel";
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ return "NVPTXISD::TexCubeArrayFloatFloat";
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexCubeArrayS32Float:
+ return "NVPTXISD::TexCubeArrayS32Float";
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexCubeArrayS32FloatLevel";
+ case NVPTXISD::TexCubeArrayU32Float:
+ return "NVPTXISD::TexCubeArrayU32Float";
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4R2DFloatFloat:
+ return "NVPTXISD::Tld4R2DFloatFloat";
+ case NVPTXISD::Tld4G2DFloatFloat:
+ return "NVPTXISD::Tld4G2DFloatFloat";
+ case NVPTXISD::Tld4B2DFloatFloat:
+ return "NVPTXISD::Tld4B2DFloatFloat";
+ case NVPTXISD::Tld4A2DFloatFloat:
+ return "NVPTXISD::Tld4A2DFloatFloat";
+ case NVPTXISD::Tld4R2DS64Float:
+ return "NVPTXISD::Tld4R2DS64Float";
+ case NVPTXISD::Tld4G2DS64Float:
+ return "NVPTXISD::Tld4G2DS64Float";
+ case NVPTXISD::Tld4B2DS64Float:
+ return "NVPTXISD::Tld4B2DS64Float";
+ case NVPTXISD::Tld4A2DS64Float:
+ return "NVPTXISD::Tld4A2DS64Float";
+ case NVPTXISD::Tld4R2DU64Float:
+ return "NVPTXISD::Tld4R2DU64Float";
+ case NVPTXISD::Tld4G2DU64Float:
+ return "NVPTXISD::Tld4G2DU64Float";
+ case NVPTXISD::Tld4B2DU64Float:
+ return "NVPTXISD::Tld4B2DU64Float";
+ case NVPTXISD::Tld4A2DU64Float:
+ return "NVPTXISD::Tld4A2DU64Float";
+
+ case NVPTXISD::TexUnified1DFloatS32:
+ return "NVPTXISD::TexUnified1DFloatS32";
+ case NVPTXISD::TexUnified1DFloatFloat:
+ return "NVPTXISD::TexUnified1DFloatFloat";
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DFloatFloatLevel";
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DFloatFloatGrad";
+ case NVPTXISD::TexUnified1DS32S32:
+ return "NVPTXISD::TexUnified1DS32S32";
+ case NVPTXISD::TexUnified1DS32Float:
+ return "NVPTXISD::TexUnified1DS32Float";
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ return "NVPTXISD::TexUnified1DS32FloatLevel";
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ return "NVPTXISD::TexUnified1DS32FloatGrad";
+ case NVPTXISD::TexUnified1DU32S32:
+ return "NVPTXISD::TexUnified1DU32S32";
+ case NVPTXISD::TexUnified1DU32Float:
+ return "NVPTXISD::TexUnified1DU32Float";
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ return "NVPTXISD::TexUnified1DU32FloatLevel";
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ return "NVPTXISD::TexUnified1DU32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ return "NVPTXISD::TexUnified1DArrayFloatS32";
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ return "NVPTXISD::TexUnified1DArrayFloatFloat";
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ return "NVPTXISD::TexUnified1DArrayS32S32";
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ return "NVPTXISD::TexUnified1DArrayS32Float";
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ return "NVPTXISD::TexUnified1DArrayU32S32";
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ return "NVPTXISD::TexUnified1DArrayU32Float";
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified2DFloatS32:
+ return "NVPTXISD::TexUnified2DFloatS32";
+ case NVPTXISD::TexUnified2DFloatFloat:
+ return "NVPTXISD::TexUnified2DFloatFloat";
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DFloatFloatLevel";
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DFloatFloatGrad";
+ case NVPTXISD::TexUnified2DS32S32:
+ return "NVPTXISD::TexUnified2DS32S32";
+ case NVPTXISD::TexUnified2DS32Float:
+ return "NVPTXISD::TexUnified2DS32Float";
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ return "NVPTXISD::TexUnified2DS32FloatLevel";
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ return "NVPTXISD::TexUnified2DS32FloatGrad";
+ case NVPTXISD::TexUnified2DU32S32:
+ return "NVPTXISD::TexUnified2DU32S32";
+ case NVPTXISD::TexUnified2DU32Float:
+ return "NVPTXISD::TexUnified2DU32Float";
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ return "NVPTXISD::TexUnified2DU32FloatLevel";
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ return "NVPTXISD::TexUnified2DU32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ return "NVPTXISD::TexUnified2DArrayFloatS32";
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ return "NVPTXISD::TexUnified2DArrayFloatFloat";
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ return "NVPTXISD::TexUnified2DArrayS32S32";
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ return "NVPTXISD::TexUnified2DArrayS32Float";
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ return "NVPTXISD::TexUnified2DArrayU32S32";
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ return "NVPTXISD::TexUnified2DArrayU32Float";
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified3DFloatS32:
+ return "NVPTXISD::TexUnified3DFloatS32";
+ case NVPTXISD::TexUnified3DFloatFloat:
+ return "NVPTXISD::TexUnified3DFloatFloat";
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ return "NVPTXISD::TexUnified3DFloatFloatLevel";
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ return "NVPTXISD::TexUnified3DFloatFloatGrad";
+ case NVPTXISD::TexUnified3DS32S32:
+ return "NVPTXISD::TexUnified3DS32S32";
+ case NVPTXISD::TexUnified3DS32Float:
+ return "NVPTXISD::TexUnified3DS32Float";
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ return "NVPTXISD::TexUnified3DS32FloatLevel";
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ return "NVPTXISD::TexUnified3DS32FloatGrad";
+ case NVPTXISD::TexUnified3DU32S32:
+ return "NVPTXISD::TexUnified3DU32S32";
+ case NVPTXISD::TexUnified3DU32Float:
+ return "NVPTXISD::TexUnified3DU32Float";
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ return "NVPTXISD::TexUnified3DU32FloatLevel";
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ return "NVPTXISD::TexUnified3DU32FloatGrad";
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeFloatFloat";
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ return "NVPTXISD::TexUnifiedCubeS32Float";
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ return "NVPTXISD::TexUnifiedCubeU32Float";
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayS32Float";
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayU32Float";
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ return "NVPTXISD::Tld4UnifiedR2DS64Float";
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ return "NVPTXISD::Tld4UnifiedG2DS64Float";
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ return "NVPTXISD::Tld4UnifiedB2DS64Float";
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ return "NVPTXISD::Tld4UnifiedA2DS64Float";
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ return "NVPTXISD::Tld4UnifiedR2DU64Float";
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ return "NVPTXISD::Tld4UnifiedG2DU64Float";
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ return "NVPTXISD::Tld4UnifiedB2DU64Float";
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ return "NVPTXISD::Tld4UnifiedA2DU64Float";
+
+ case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
+ case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
+ case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
+ case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
+ case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
+ case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
+ case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
+ case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
+ case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
+ case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
+ case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
+
+ case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
+ case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
+ case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
+ case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
+ case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
+ case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
+ case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
+ case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
+ case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
+ case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
+ case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
+ case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
+ case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
+ case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
+ case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
+ case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
+ case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
+ case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
+ case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
+ case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
+ case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
+
+ case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
+ case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
+ case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
+ case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
+ case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
+ case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
+ case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
+ case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
+ case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
+ case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
+ case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
+ case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
+ case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
+ case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
+ case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
+ case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
+ case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
+ case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
+ case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
+ case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
+ case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
+
+ case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
+ case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
+ case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
+ case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
+ case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
+ case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
+ case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
+ case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
+ case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
+ case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
+ case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
+
+ case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
+ case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
+ case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
+ case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
+ case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
+ case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
+ case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
+ case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
+ case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
+ case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
+ case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
+
+ case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
+ case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
+ case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
+ case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
+ case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
+ case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
+ case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
+ case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
+ case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
+ case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
+ case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
+
+ case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
+ case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
+ case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
+ case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
+ case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
+ case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
+ case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
+ case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
+ case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
+ case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
+ case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
+
+ case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
+ case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
+ case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
+ case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
+ case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
+ case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
+ case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
+ case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
+ case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
+ case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
+ case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
+
+ case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
+ case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
+ case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
+ case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
+ case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
+ case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
+ case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
+ case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
+ case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
+ case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
+ case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
+
+ case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
+ case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
+ case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
+ case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
+ case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
+ case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
+ case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
+ case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
+ case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
+ case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
+ case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
+
+ case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
+ case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
+ case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
+ case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
+ case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
+ case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
+ case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
+ case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
+ case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
+ case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
+ case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
+
+ case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
+ case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
+ case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
+ case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
+ case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
+ case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
+ case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
+ case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
+ case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
+ case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
+ case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
+
+ case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
+ case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
+ case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
+ case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
+ case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
+ case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
+ case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
+ case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
+ case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
+ case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
+ case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
+ }
+ return nullptr;
+}
+
+TargetLoweringBase::LegalizeTypeAction
+NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
+ return TypeSplitVector;
+ if (VT == MVT::v2f16)
+ return TypeLegal;
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
+SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+ int Enabled, int &ExtraSteps,
+ bool &UseOneConst,
+ bool Reciprocal) const {
+ if (!(Enabled == ReciprocalEstimate::Enabled ||
+ (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
+ return SDValue();
+
+ if (ExtraSteps == ReciprocalEstimate::Unspecified)
+ ExtraSteps = 0;
+
+ SDLoc DL(Operand);
+ EVT VT = Operand.getValueType();
+ bool Ftz = useF32FTZ(DAG.getMachineFunction());
+
+ auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(IID, DL, MVT::i32), Operand);
+ };
+
+ // The sqrt and rsqrt refinement processes assume we always start out with an
+ // approximation of the rsqrt. Therefore, if we're going to do any refinement
+ // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
+ // any refinement, we must return a regular sqrt.
+ if (Reciprocal || ExtraSteps > 0) {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
+ : Intrinsic::nvvm_rsqrt_approx_f);
+ else if (VT == MVT::f64)
+ return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
+ else
+ return SDValue();
+ } else {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
+ : Intrinsic::nvvm_sqrt_approx_f);
+ else {
+ // There's no sqrt.approx.f64 instruction, so we emit
+ // reciprocal(rsqrt(x)). This is faster than
+ // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
+ // x * rsqrt(x).)
+ return DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
+ MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
+ }
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
+ return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
+}
+
+std::string NVPTXTargetLowering::getPrototype(
+ const DataLayout &DL, Type *retTy, const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
+ const ImmutableCallSite *CS) const {
+ auto PtrVT = getPointerTy(DL);
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return "";
+
+ std::stringstream O;
+ O << "prototype_" << uniqueCallSite << " : .callprototype ";
+
+ if (retTy->getTypeID() == Type::VoidTyID) {
+ O << "()";
+ } else {
+ O << "(";
+ if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
+ unsigned size = 0;
+ if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
+ size = ITy->getBitWidth();
+ } else {
+ assert(retTy->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = retTy->getPrimitiveSizeInBits();
+ }
+ // PTX ABI requires all scalar return values to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type in
+ // PTX, so its size must be adjusted here, too.
+ if (size < 32)
+ size = 32;
+
+ O << ".param .b" << size << " _";
+ } else if (isa<PointerType>(retTy)) {
+ O << ".param .b" << PtrVT.getSizeInBits() << " _";
+ } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
+ auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
+ O << ".param .align " << retAlignment << " .b8 _["
+ << DL.getTypeAllocSize(retTy) << "]";
+ } else {
+ llvm_unreachable("Unknown return type");
+ }
+ O << ") ";
+ }
+ O << "_ (";
+
+ bool first = true;
+
+ unsigned OIdx = 0;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ Type *Ty = Args[i].Ty;
+ if (!first) {
+ O << ", ";
+ }
+ first = false;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ unsigned align = 0;
+ const CallInst *CallI = cast<CallInst>(CS->getInstruction());
+ // +1 because index 0 is reserved for return type alignment
+ if (!getAlign(*CallI, i + 1, align))
+ align = DL.getABITypeAlignment(Ty);
+ unsigned sz = DL.getTypeAllocSize(Ty);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ // update the index for Outs
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, DL, Ty, vtparts);
+ if (unsigned len = vtparts.size())
+ OIdx += len - 1;
+ continue;
+ }
+ // i8 types in IR will be i16 types in SDAG
+ assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
+ (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
+ "type mismatch between callee prototype and arguments");
+ // scalar type
+ unsigned sz = 0;
+ if (isa<IntegerType>(Ty)) {
+ sz = cast<IntegerType>(Ty)->getBitWidth();
+ if (sz < 32)
+ sz = 32;
+ } else if (isa<PointerType>(Ty)) {
+ sz = PtrVT.getSizeInBits();
+ } else if (Ty->isHalfTy())
+ // PTX ABI requires all scalar parameters to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type
+ // in PTX, so its size must be adjusted here, too.
+ sz = 32;
+ else
+ sz = Ty->getPrimitiveSizeInBits();
+ O << ".param .b" << sz << " ";
+ O << "_";
+ continue;
+ }
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ assert(PTy && "Param with byval attribute should be a pointer type");
+ Type *ETy = PTy->getElementType();
+
+ unsigned align = Outs[OIdx].Flags.getByValAlign();
+ unsigned sz = DL.getTypeAllocSize(ETy);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ }
+ O << ");";
+ return O.str();
+}
+
+unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
+ const ImmutableCallSite *CS,
+ Type *Ty, unsigned Idx,
+ const DataLayout &DL) const {
+ if (!CS) {
+ // CallSite is zero, fallback to ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+ }
+
+ unsigned Align = 0;
+ const Value *DirectCallee = CS->getCalledFunction();
+
+ if (!DirectCallee) {
+ // We don't have a direct function symbol, but that may be because of
+ // constant cast instructions in the call.
+ const Instruction *CalleeI = CS->getInstruction();
+ assert(CalleeI && "Call target is not a function or derived value?");
+
+ // With bitcast'd call targets, the instruction will be the call
+ if (isa<CallInst>(CalleeI)) {
+ // Check if we have call alignment metadata
+ if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
+ return Align;
+
+ const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
+ // Ignore any bitcast instructions
+ while (isa<ConstantExpr>(CalleeV)) {
+ const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
+ if (!CE->isCast())
+ break;
+ // Look through the bitcast
+ CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
+ }
+
+ // We have now looked past all of the bitcasts. Do we finally have a
+ // Function?
+ if (isa<Function>(CalleeV))
+ DirectCallee = CalleeV;
+ }
+ }
+
+ // Check for function alignment information if we found that the
+ // ultimate target is a Function
+ if (DirectCallee)
+ if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
+ return Align;
+
+ // Call is indirect or alignment information is not available, fall back to
+ // the ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+}
+
+SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ ArgListTy &Args = CLI.getArgs();
+ Type *RetTy = CLI.RetTy;
+ ImmutableCallSite *CS = CLI.CS;
+ const DataLayout &DL = DAG.getDataLayout();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ SDValue tempChain = Chain;
+ Chain = DAG.getCALLSEQ_START(
+ Chain, DAG.getIntPtrConstant(uniqueCallSite, dl, true), dl);
+ SDValue InFlag = Chain.getValue(1);
+
+ unsigned paramCount = 0;
+ // Args.size() and Outs.size() need not match.
+ // Outs.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Outs)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Outs.
+ // So a different index should be used for indexing into Outs/OutVals.
+ // See similar issue in LowerFormalArguments.
+ unsigned OIdx = 0;
+ // Declare the .params or .reg need to pass values
+ // to the function
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ EVT VT = Outs[OIdx].VT;
+ Type *Ty = Args[i].Ty;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
+ unsigned ArgAlign =
+ getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
+ unsigned AllocSize = DL.getTypeAllocSize(Ty);
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ bool NeedAlign; // Does argument declaration specify alignment?
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ SDValue DeclareParamOps[] = {
+ Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ NeedAlign = true;
+ } else {
+ // declare .param .b<size> .param<n>;
+ if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
+ // PTX ABI requires integral types to be at least 32 bits in
+ // size. FP16 is loaded/stored using i16, so it's handled
+ // here as well.
+ AllocSize = 4;
+ }
+ SDValue DeclareScalarParamOps[] = {
+ Chain, DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize * 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
+ DeclareScalarParamOps);
+ NeedAlign = false;
+ }
+ InFlag = Chain.getValue(1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
+ // than 32-bits are sign extended or zero extended, depending on
+ // whether they are signed or unsigned types. This case applies
+ // only to scalar parameters and not to aggregate values.
+ bool ExtendIntegerParam =
+ Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
+
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ // New store.
+ if (VectorInfo[j] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Unfinished preceeding store.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
+ StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
+ }
+
+ EVT EltVT = VTs[j];
+ SDValue StVal = OutVals[OIdx];
+ if (ExtendIntegerParam) {
+ assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
+ // zext/sext to i32
+ StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, StVal);
+ } else if (EltVT.getSizeInBits() < 16) {
+ // Use 16-bit registers for small stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
+ }
+
+ // Record the value to store.
+ StoreOperands.push_back(StVal);
+
+ if (VectorInfo[j] & PVF_LAST) {
+ unsigned NumElts = StoreOperands.size() - 3;
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreParam;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ StoreOperands.push_back(InFlag);
+
+ // Adjust type of the store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
+ unsigned EltAlign =
+ NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
+
+ Chain = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
+ TheStoreType, MachinePointerInfo(), EltAlign);
+ InFlag = Chain.getValue(1);
+
+ // Cleanup.
+ StoreOperands.clear();
+ }
+ ++OIdx;
+ }
+ assert(StoreOperands.empty() && "Unfinished parameter store.");
+ if (VTs.size() > 0)
+ --OIdx;
+ ++paramCount;
+ continue;
+ }
+
+ // ByVal arguments
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
+ assert(PTy && "Type of a byval parameter should be pointer");
+ ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
+
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = Outs[OIdx].Flags.getByValSize();
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
+ // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
+ // so we don't need to worry about natural alignment or not.
+ // See TargetLowering::LowerCallTo().
+
+ // Enforce minumum alignment of 4 to work around ptxas miscompile
+ // for sm_50+. See corresponding alignment adjustment in
+ // emitFunctionParamList() for details.
+ if (ArgAlign < 4)
+ ArgAlign = 4;
+ SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ InFlag = Chain.getValue(1);
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ EVT elemtype = VTs[j];
+ int curOffset = Offsets[j];
+ unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
+ auto PtrVT = getPointerTy(DL);
+ SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
+ DAG.getConstant(curOffset, dl, PtrVT));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), PartAlign);
+ if (elemtype.getSizeInBits() < 16) {
+ theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
+ }
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(curOffset, dl, MVT::i32),
+ theVal, InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, elemtype,
+ MachinePointerInfo());
+
+ InFlag = Chain.getValue(1);
+ }
+ ++paramCount;
+ }
+
+ GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
+ unsigned retAlignment = 0;
+
+ // Handle Result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> resvtparts;
+ ComputeValueVTs(*this, DL, RetTy, resvtparts);
+
+ // Declare
+ // .param .align 16 .b8 retval0[<size-in-bytes>], or
+ // .param .b<size-in-bits> retval0
+ unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
+ // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
+ // these three types to match the logic in
+ // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
+ // Plus, this behavior is consistent with nvcc's.
+ if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
+ RetTy->isPointerTy()) {
+ // Scalar needs to be at least 32bit wide
+ if (resultsz < 32)
+ resultsz = 32;
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(resultsz, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ } else {
+ retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain,
+ DAG.getConstant(retAlignment, dl, MVT::i32),
+ DAG.getConstant(resultsz / 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ }
+ }
+
+ if (!Func) {
+ // This is indirect function call case : PTX requires a prototype of the
+ // form
+ // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
+ // to be emitted, and the label has to used as the last arg of call
+ // instruction.
+ // The prototype is embedded in a string and put as the operand for a
+ // CallPrototype SDNode which will print out to the value of the string.
+ SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
+ const char *ProtoStr =
+ nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
+ SDValue ProtoOps[] = {
+ Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
+ };
+ Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
+ InFlag = Chain.getValue(1);
+ }
+ // Op to just print "call"
+ SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrintCallOps[] = {
+ Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
+ };
+ // We model convergent calls as separate opcodes.
+ unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
+ if (CLI.IsConvergent)
+ Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
+ : NVPTXISD::PrintConvergentCall;
+ Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the function name
+ SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallVoidOps[] = { Chain, Callee, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the param list
+ SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgBeginOps[] = { Chain, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
+ CallArgBeginOps);
+ InFlag = Chain.getValue(1);
+
+ for (unsigned i = 0, e = paramCount; i != e; ++i) {
+ unsigned opcode;
+ if (i == (e - 1))
+ opcode = NVPTXISD::LastCallArg;
+ else
+ opcode = NVPTXISD::CallArg;
+ SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(i, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
+ InFlag = Chain.getValue(1);
+ }
+ SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgEndOps[] = { Chain,
+ DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
+ InFlag = Chain.getValue(1);
+
+ if (!Func) {
+ SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrototypeOps[] = { Chain,
+ DAG.getConstant(uniqueCallSite, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
+ InFlag = Chain.getValue(1);
+ }
+
+ // Generate loads from param memory/moves from registers for result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
+ assert(VTs.size() == Ins.size() && "Bad value decomposition");
+
+ unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
+
+ SmallVector<EVT, 6> LoadVTs;
+ int VecIdx = -1; // Index of the first element of the vector.
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ bool needTruncate = false;
+ EVT TheLoadType = VTs[i];
+ EVT EltType = Ins[i].VT;
+ unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
+ if (ExtendIntegerRetVal) {
+ TheLoadType = MVT::i32;
+ EltType = MVT::i32;
+ needTruncate = true;
+ } else if (TheLoadType.getSizeInBits() < 16) {
+ if (VTs[i].isInteger())
+ needTruncate = true;
+ EltType = MVT::i16;
+ }
+
+ // Record index of the very first element of the vector.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
+ VecIdx = i;
+ }
+
+ LoadVTs.push_back(EltType);
+
+ if (VectorInfo[i] & PVF_LAST) {
+ unsigned NumElts = LoadVTs.size();
+ LoadVTs.push_back(MVT::Other);
+ LoadVTs.push_back(MVT::Glue);
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::LoadParam;
+ break;
+ case 2:
+ Op = NVPTXISD::LoadParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::LoadParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ SDValue LoadOperands[] = {
+ Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
+ SDValue RetVal = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
+ MachinePointerInfo(), EltAlign);
+
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Ret = RetVal.getValue(j);
+ if (needTruncate)
+ Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
+ InVals.push_back(Ret);
+ }
+ Chain = RetVal.getValue(NumElts);
+ InFlag = RetVal.getValue(NumElts + 1);
+
+ // Cleanup
+ VecIdx = -1;
+ LoadVTs.clear();
+ }
+ }
+ }
+
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, dl, true),
+ DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
+ true),
+ InFlag, dl);
+ uniqueCallSite++;
+
+ // set isTailCall to false for now, until we figure out how to express
+ // tail call optimization in PTX
+ isTailCall = false;
+ return Chain;
+}
+
+// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
+// (see LegalizeDAG.cpp). This is slow and uses local memory.
+// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
+SDValue
+NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ SmallVector<SDValue, 8> Ops;
+ unsigned NumOperands = Node->getNumOperands();
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ SDValue SubOp = Node->getOperand(i);
+ EVT VVT = SubOp.getNode()->getValueType(0);
+ EVT EltVT = VVT.getVectorElementType();
+ unsigned NumSubElem = VVT.getVectorNumElements();
+ for (unsigned j = 0; j < NumSubElem; ++j) {
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
+ DAG.getIntPtrConstant(j, dl)));
+ }
+ }
+ return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
+}
+
+// We can init constant f16x2 with a single .b32 move. Normally it
+// would get lowered as two constant loads and vector-packing move.
+// mov.b16 %h1, 0x4000;
+// mov.b16 %h2, 0x3C00;
+// mov.b32 %hh2, {%h2, %h1};
+// Instead we want just a constant move:
+// mov.b32 %hh2, 0x40003C00
+//
+// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
+// generates good SASS in both cases.
+SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ //return Op;
+ if (!(Op->getValueType(0) == MVT::v2f16 &&
+ isa<ConstantFPSDNode>(Op->getOperand(0)) &&
+ isa<ConstantFPSDNode>(Op->getOperand(1))))
+ return Op;
+
+ APInt E0 =
+ cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
+ APInt E1 =
+ cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
+ SDValue Const =
+ DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
+ return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
+}
+
+SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Index = Op->getOperand(1);
+ // Constant index will be matched by tablegen.
+ if (isa<ConstantSDNode>(Index.getNode()))
+ return Op;
+
+ // Extract individual elements and select one of them.
+ SDValue Vector = Op->getOperand(0);
+ EVT VectorVT = Vector.getValueType();
+ assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
+ EVT EltVT = VectorVT.getVectorElementType();
+
+ SDLoc dl(Op.getNode());
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(0, dl));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(1, dl));
+ return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
+ ISD::CondCode::SETEQ);
+}
+
+/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // dHi = aHi >> Amt
+ // dLo = shf.r.clamp aLo, aHi, Amt
+
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // - if (Amt>=size) then
+ // dLo = aHi >> (Amt-size)
+ // dHi = aHi >> Amt (this is either all 0 or all 1)
+ // else
+ // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
+ // dHi = aHi >> Amt
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+/// LowerShiftLeftParts - Lower SHL_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SHL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // dHi = shf.l.clamp aLo, aHi, Amt
+ // dLo = aLo << Amt
+
+ SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // - if (Amt>=size) then
+ // dLo = aLo << Amt (all 0)
+ // dLo = aLo << (Amt-size)
+ // else
+ // dLo = aLo << Amt
+ // dHi = (aHi << Amt) | (aLo >> (size-Amt))
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::RETURNADDR:
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return SDValue();
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return Op;
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR:
+ return Op;
+ case ISD::EXTRACT_VECTOR_ELT:
+ return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::CONCAT_VECTORS:
+ return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::STORE:
+ return LowerSTORE(Op, DAG);
+ case ISD::LOAD:
+ return LowerLOAD(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS:
+ return LowerShiftRightParts(Op, DAG);
+ case ISD::SELECT:
+ return LowerSelect(Op, DAG);
+ default:
+ llvm_unreachable("Custom lowering not defined for operation");
+ }
+}
+
+SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Op0 = Op->getOperand(0);
+ SDValue Op1 = Op->getOperand(1);
+ SDValue Op2 = Op->getOperand(2);
+ SDLoc DL(Op.getNode());
+
+ assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
+
+ Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
+ SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
+
+ return Trunc;
+}
+
+SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getValueType() == MVT::i1)
+ return LowerLOADi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // loads and have to handle it here.
+ if (Op.getValueType() == MVT::v2f16) {
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
+ EVT MemVT = Load->getMemoryVT();
+ if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
+ Load->getAddressSpace(), Load->getAlignment())) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, SDLoc(Op));
+ }
+ }
+
+ return SDValue();
+}
+
+// v = ld i1* addr
+// =>
+// v1 = ld i8* addr (-> i16)
+// v = trunc i16 to i1
+SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ LoadSDNode *LD = cast<LoadSDNode>(Node);
+ SDLoc dl(Node);
+ assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
+ assert(Node->getValueType(0) == MVT::i1 &&
+ "Custom lowering for i1 load only");
+ SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(), LD->getAlignment(),
+ LD->getMemOperand()->getFlags());
+ SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
+ // The legalizer (the caller) is expecting two values from the legalized
+ // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
+ // in LegalizeDAG.cpp which also uses MergeValues.
+ SDValue Ops[] = { result, LD->getChain() };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT VT = Store->getMemoryVT();
+
+ if (VT == MVT::i1)
+ return LowerSTOREi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // stores and have to handle it here.
+ if (VT == MVT::v2f16 &&
+ !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ Store->getAddressSpace(), Store->getAlignment()))
+ return expandUnalignedStore(Store, DAG);
+
+ if (VT.isVector())
+ return LowerSTOREVector(Op, DAG);
+
+ return SDValue();
+}
+
+SDValue
+NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *N = Op.getNode();
+ SDValue Val = N->getOperand(1);
+ SDLoc DL(N);
+ EVT ValVT = Val.getValueType();
+
+ if (ValVT.isVector()) {
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 stores of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ if (!ValVT.isSimple())
+ return SDValue();
+ switch (ValVT.getSimpleVT().SimpleTy) {
+ default:
+ return SDValue();
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ MemSDNode *MemSD = cast<MemSDNode>(N);
+ const DataLayout &TD = DAG.getDataLayout();
+
+ unsigned Align = MemSD->getAlignment();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This store is not sufficiently aligned, so bail out and let this vector
+ // store be scalarized. Note that we may still be able to emit smaller
+ // vector stores. For example, if we are storing a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return SDValue();
+ }
+
+ unsigned Opcode = 0;
+ EVT EltVT = ValVT.getVectorElementType();
+ unsigned NumElts = ValVT.getVectorNumElements();
+
+ // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // stored type to i16 and propagate the "real" type as the memory type.
+ bool NeedExt = false;
+ if (EltVT.getSizeInBits() < 16)
+ NeedExt = true;
+
+ bool StoreF16x2 = false;
+ switch (NumElts) {
+ default:
+ return SDValue();
+ case 2:
+ Opcode = NVPTXISD::StoreV2;
+ break;
+ case 4:
+ Opcode = NVPTXISD::StoreV4;
+ break;
+ case 8:
+ // v8f16 is a special case. PTX doesn't have st.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // store them with st.v4.b32.
+ assert(EltVT == MVT::f16 && "Wrong type for the vector.");
+ Opcode = NVPTXISD::StoreV4;
+ StoreF16x2 = true;
+ break;
+ }
+
+ SmallVector<SDValue, 8> Ops;
+
+ // First is the chain
+ Ops.push_back(N->getOperand(0));
+
+ if (StoreF16x2) {
+ // Combine f16,f16 -> v2f16
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2 + 1, DL));
+ SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
+ Ops.push_back(V2);
+ }
+ } else {
+ // Then the split values
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
+ DAG.getIntPtrConstant(i, DL));
+ if (NeedExt)
+ ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
+ Ops.push_back(ExtVal);
+ }
+ }
+
+ // Then any remaining arguments
+ Ops.append(N->op_begin() + 2, N->op_end());
+
+ SDValue NewSt =
+ DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
+ MemSD->getMemoryVT(), MemSD->getMemOperand());
+
+ // return DCI.CombineTo(N, NewSt, true);
+ return NewSt;
+ }
+
+ return SDValue();
+}
+
+// st i1 v, addr
+// =>
+// v1 = zxt v to i16
+// st.u8 i16, addr
+SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ StoreSDNode *ST = cast<StoreSDNode>(Node);
+ SDValue Tmp1 = ST->getChain();
+ SDValue Tmp2 = ST->getBasePtr();
+ SDValue Tmp3 = ST->getValue();
+ assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
+ SDValue Result =
+ DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
+ ST->getAlignment(), ST->getMemOperand()->getFlags());
+ return Result;
+}
+
+SDValue
+NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
+ std::string ParamSym;
+ raw_string_ostream ParamStr(ParamSym);
+
+ ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
+ ParamStr.flush();
+
+ std::string *SavedStr =
+ nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
+ return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
+}
+
+// Check to see if the kernel argument is image*_t or sampler_t
+
+static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
+ static const char *const specialTypes[] = { "struct._image2d_t",
+ "struct._image3d_t",
+ "struct._sampler_t" };
+
+ Type *Ty = arg->getType();
+ auto *PTy = dyn_cast<PointerType>(Ty);
+
+ if (!PTy)
+ return false;
+
+ if (!context)
+ return false;
+
+ auto *STy = dyn_cast<StructType>(PTy->getElementType());
+ if (!STy || STy->isLiteral())
+ return false;
+
+ return std::find(std::begin(specialTypes), std::end(specialTypes),
+ STy->getName()) != std::end(specialTypes);
+}
+
+SDValue NVPTXTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const DataLayout &DL = DAG.getDataLayout();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ const Function *F = MF.getFunction();
+ const AttributeList &PAL = F->getAttributes();
+ const TargetLowering *TLI = STI.getTargetLowering();
+
+ SDValue Root = DAG.getRoot();
+ std::vector<SDValue> OutChains;
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ std::vector<Type *> argTypes;
+ std::vector<const Argument *> theArgs;
+ for (const Argument &I : F->args()) {
+ theArgs.push_back(&I);
+ argTypes.push_back(I.getType());
+ }
+ // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
+ // Ins.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Ins)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Ins.
+ // So a different index should be used for indexing into Ins.
+ // See similar issue in LowerCall.
+ unsigned InsIdx = 0;
+
+ int idx = 0;
+ for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
+ Type *Ty = argTypes[i];
+
+ // If the kernel argument is image*_t or sampler_t, convert it to
+ // a i32 constant holding the parameter position. This can later
+ // matched in the AsmPrinter to output the correct mangled name.
+ if (isImageOrSamplerVal(
+ theArgs[i],
+ (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
+ : nullptr))) {
+ assert(isKernelFunction(*F) &&
+ "Only kernels can have image/sampler params");
+ InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
+ continue;
+ }
+
+ if (theArgs[i]->use_empty()) {
+ // argument is dead
+ if (Ty->isAggregateType()) {
+ SmallVector<EVT, 16> vtparts;
+
+ ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
+ assert(vtparts.size() > 0 && "empty aggregate type not expected");
+ for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
+ ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (vtparts.size() > 0)
+ --InsIdx;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(DL, Ty);
+ unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
+ for (unsigned parti = 0; parti < NumRegs; ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (NumRegs > 0)
+ --InsIdx;
+ continue;
+ }
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ continue;
+ }
+
+ // In the following cases, assign a node order of "idx+1"
+ // to newly created nodes. The SDNodes for params have to
+ // appear in the same order as their order of appearance
+ // in the original function. "idx+1" holds that order.
+ if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
+ bool aggregateIsPacked = false;
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ aggregateIsPacked = STy->isPacked();
+
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
+ assert(VTs.size() > 0 && "Unexpected empty type.");
+ auto VectorInfo =
+ VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
+
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ int VecIdx = -1; // Index of the first element of the current vector.
+ for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
+ if (VectorInfo[parti] & PVF_FIRST) {
+ assert(VecIdx == -1 && "Orphaned vector.");
+ VecIdx = parti;
+ }
+
+ // That's the last element of this store op.
+ if (VectorInfo[parti] & PVF_LAST) {
+ unsigned NumElts = parti - VecIdx + 1;
+ EVT EltVT = VTs[parti];
+ // i1 is loaded/stored as i8.
+ EVT LoadVT = EltVT;
+ if (EltVT == MVT::i1)
+ LoadVT = MVT::i8;
+ else if (EltVT == MVT::v2f16)
+ // getLoad needs a vector type, but it can't handle
+ // vectors which contain v2f16 elements. So we must load
+ // using i32 here and then bitcast back.
+ LoadVT = MVT::i32;
+
+ EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
+ SDValue VecAddr =
+ DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
+ DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
+ Value *srcValue = Constant::getNullValue(PointerType::get(
+ EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
+ SDValue P =
+ DAG.getLoad(VecVT, dl, Root, VecAddr,
+ MachinePointerInfo(srcValue), aggregateIsPacked,
+ MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant);
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
+ DAG.getIntPtrConstant(j, dl));
+ // We've loaded i1 as an i8 and now must truncate it back to i1
+ if (EltVT == MVT::i1)
+ Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
+ // v2f16 was loaded as an i32. Now we must bitcast it back.
+ else if (EltVT == MVT::v2f16)
+ Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
+ // Extend the element if necesary (e.g. an i8 is loaded
+ // into an i16 register)
+ if (Ins[InsIdx].VT.isInteger() &&
+ Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
+ unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND;
+ Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
+ }
+ InVals.push_back(Elt);
+ }
+
+ // Reset vector tracking state.
+ VecIdx = -1;
+ }
+ ++InsIdx;
+ }
+ if (VTs.size() > 0)
+ --InsIdx;
+ continue;
+ }
+
+ // Param has ByVal attribute
+ // Return MoveParam(param symbol).
+ // Ideally, the param symbol can be returned directly,
+ // but when SDNode builder decides to use it in a CopyToReg(),
+ // machine instruction fails because TargetExternalSymbol
+ // (not lowered) is target dependent, and CopyToReg assumes
+ // the source is lowered.
+ EVT ObjectVT = getValueType(DL, Ty);
+ assert(ObjectVT == Ins[InsIdx].VT &&
+ "Ins type did not match function type");
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ InVals.push_back(p);
+ }
+
+ // Clang will check explicit VarArg and issue error if any. However, Clang
+ // will let code with
+ // implicit var arg like f() pass. See bug 617733.
+ // We treat this case as if the arg list is empty.
+ // if (F.isVarArg()) {
+ // assert(0 && "VarArg not supported yet!");
+ //}
+
+ if (!OutChains.empty())
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
+
+ return Chain;
+}
+
+SDValue
+NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ Type *RetTy = MF.getFunction()->getReturnType();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ const DataLayout DL = DAG.getDataLayout();
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
+ assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
+
+ auto VectorInfo = VectorizePTXValueVTs(
+ VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ // New load/store. Record chain and offset operands.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Orphaned operand list.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
+ }
+
+ SDValue RetVal = OutVals[i];
+ if (ExtendIntegerRetVal) {
+ RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, RetVal);
+ } else if (RetVal.getValueSizeInBits() < 16) {
+ // Use 16-bit registers for small load-stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
+ }
+
+ // Record the value to return.
+ StoreOperands.push_back(RetVal);
+
+ // That's the last element of this store op.
+ if (VectorInfo[i] & PVF_LAST) {
+ NVPTXISD::NodeType Op;
+ unsigned NumElts = StoreOperands.size() - 2;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreRetval;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreRetvalV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreRetvalV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ // Adjust type of load/store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
+ Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
+ StoreOperands, TheStoreType,
+ MachinePointerInfo(), 1);
+ // Cleanup vector state.
+ StoreOperands.clear();
+ }
+ }
+
+ return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+void NVPTXTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ if (Constraint.length() > 1)
+ return;
+ else
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ return NVPTXISD::Tex1DFloatS32;
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloat;
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ return NVPTXISD::Tex1DS32S32;
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ return NVPTXISD::Tex1DS32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ return NVPTXISD::Tex1DU32S32;
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ return NVPTXISD::Tex1DU32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ return NVPTXISD::Tex1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ return NVPTXISD::Tex1DArrayS32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ return NVPTXISD::Tex1DArrayU32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ return NVPTXISD::Tex2DFloatS32;
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloat;
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ return NVPTXISD::Tex2DS32S32;
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ return NVPTXISD::Tex2DS32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ return NVPTXISD::Tex2DU32S32;
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ return NVPTXISD::Tex2DU32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ return NVPTXISD::Tex2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ return NVPTXISD::Tex2DArrayS32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ return NVPTXISD::Tex2DArrayU32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ return NVPTXISD::Tex3DFloatS32;
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloat;
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ return NVPTXISD::Tex3DS32S32;
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ return NVPTXISD::Tex3DS32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ return NVPTXISD::Tex3DU32S32;
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ return NVPTXISD::Tex3DU32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloat;
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ return NVPTXISD::TexCubeS32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ return NVPTXISD::TexCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ return NVPTXISD::TexCubeU32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ return NVPTXISD::TexCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4R2DFloatFloat;
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4G2DFloatFloat;
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4B2DFloatFloat;
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4A2DFloatFloat;
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4R2DS64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4G2DS64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4B2DS64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4A2DS64Float;
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4R2DU64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4G2DU64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4B2DU64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4A2DU64Float;
+
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ return NVPTXISD::TexUnified1DFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ return NVPTXISD::TexUnified1DS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ return NVPTXISD::TexUnified1DU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ return NVPTXISD::TexUnified1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ return NVPTXISD::TexUnified1DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ return NVPTXISD::TexUnified1DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ return NVPTXISD::TexUnified2DFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ return NVPTXISD::TexUnified2DS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ return NVPTXISD::TexUnified2DU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ return NVPTXISD::TexUnified2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ return NVPTXISD::TexUnified2DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ return NVPTXISD::TexUnified2DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ return NVPTXISD::TexUnified3DFloatS32;
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ return NVPTXISD::TexUnified3DS32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ return NVPTXISD::TexUnified3DU32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedR2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedG2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedB2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedA2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedR2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedG2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedB2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedA2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedR2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedG2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedB2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedA2DU64Float;
+ }
+}
+
+static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ return NVPTXISD::Suld1DI8Clamp;
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ return NVPTXISD::Suld1DI16Clamp;
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ return NVPTXISD::Suld1DI32Clamp;
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ return NVPTXISD::Suld1DI64Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ return NVPTXISD::Suld1DV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ return NVPTXISD::Suld1DV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ return NVPTXISD::Suld1DV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ return NVPTXISD::Suld1DV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ return NVPTXISD::Suld1DV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ return NVPTXISD::Suld1DV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ return NVPTXISD::Suld1DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ return NVPTXISD::Suld1DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ return NVPTXISD::Suld1DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ return NVPTXISD::Suld1DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ return NVPTXISD::Suld1DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ return NVPTXISD::Suld1DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ return NVPTXISD::Suld1DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ return NVPTXISD::Suld1DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ return NVPTXISD::Suld1DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ return NVPTXISD::Suld1DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ return NVPTXISD::Suld1DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ return NVPTXISD::Suld1DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ return NVPTXISD::Suld2DI8Clamp;
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ return NVPTXISD::Suld2DI16Clamp;
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ return NVPTXISD::Suld2DI32Clamp;
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ return NVPTXISD::Suld2DI64Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ return NVPTXISD::Suld2DV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ return NVPTXISD::Suld2DV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ return NVPTXISD::Suld2DV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ return NVPTXISD::Suld2DV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ return NVPTXISD::Suld2DV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ return NVPTXISD::Suld2DV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ return NVPTXISD::Suld2DV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ return NVPTXISD::Suld2DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ return NVPTXISD::Suld2DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ return NVPTXISD::Suld2DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ return NVPTXISD::Suld2DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ return NVPTXISD::Suld2DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ return NVPTXISD::Suld2DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ return NVPTXISD::Suld2DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ return NVPTXISD::Suld2DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ return NVPTXISD::Suld2DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ return NVPTXISD::Suld2DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ return NVPTXISD::Suld2DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ return NVPTXISD::Suld3DI8Clamp;
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ return NVPTXISD::Suld3DI16Clamp;
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ return NVPTXISD::Suld3DI32Clamp;
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ return NVPTXISD::Suld3DI64Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ return NVPTXISD::Suld3DV2I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ return NVPTXISD::Suld3DV2I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ return NVPTXISD::Suld3DV2I32Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ return NVPTXISD::Suld3DV2I64Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ return NVPTXISD::Suld3DV4I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ return NVPTXISD::Suld3DV4I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ return NVPTXISD::Suld3DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ return NVPTXISD::Suld1DI8Trap;
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ return NVPTXISD::Suld1DI16Trap;
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ return NVPTXISD::Suld1DI32Trap;
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ return NVPTXISD::Suld1DI64Trap;
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ return NVPTXISD::Suld1DV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ return NVPTXISD::Suld1DV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ return NVPTXISD::Suld1DV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ return NVPTXISD::Suld1DV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ return NVPTXISD::Suld1DV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ return NVPTXISD::Suld1DV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ return NVPTXISD::Suld1DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ return NVPTXISD::Suld1DArrayI8Trap;
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ return NVPTXISD::Suld1DArrayI16Trap;
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ return NVPTXISD::Suld1DArrayI32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ return NVPTXISD::Suld1DArrayI64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ return NVPTXISD::Suld1DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ return NVPTXISD::Suld1DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ return NVPTXISD::Suld1DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ return NVPTXISD::Suld1DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ return NVPTXISD::Suld1DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ return NVPTXISD::Suld1DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ return NVPTXISD::Suld1DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ return NVPTXISD::Suld2DI8Trap;
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ return NVPTXISD::Suld2DI16Trap;
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ return NVPTXISD::Suld2DI32Trap;
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ return NVPTXISD::Suld2DI64Trap;
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ return NVPTXISD::Suld2DV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ return NVPTXISD::Suld2DV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ return NVPTXISD::Suld2DV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ return NVPTXISD::Suld2DV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ return NVPTXISD::Suld2DV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ return NVPTXISD::Suld2DV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ return NVPTXISD::Suld2DV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ return NVPTXISD::Suld2DArrayI8Trap;
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ return NVPTXISD::Suld2DArrayI16Trap;
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ return NVPTXISD::Suld2DArrayI32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ return NVPTXISD::Suld2DArrayI64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ return NVPTXISD::Suld2DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ return NVPTXISD::Suld2DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ return NVPTXISD::Suld2DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ return NVPTXISD::Suld2DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ return NVPTXISD::Suld2DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ return NVPTXISD::Suld2DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ return NVPTXISD::Suld2DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ return NVPTXISD::Suld3DI8Trap;
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ return NVPTXISD::Suld3DI16Trap;
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ return NVPTXISD::Suld3DI32Trap;
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ return NVPTXISD::Suld3DI64Trap;
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ return NVPTXISD::Suld3DV2I8Trap;
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ return NVPTXISD::Suld3DV2I16Trap;
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ return NVPTXISD::Suld3DV2I32Trap;
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ return NVPTXISD::Suld3DV2I64Trap;
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ return NVPTXISD::Suld3DV4I8Trap;
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ return NVPTXISD::Suld3DV4I16Trap;
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ return NVPTXISD::Suld3DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ return NVPTXISD::Suld1DI8Zero;
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ return NVPTXISD::Suld1DI16Zero;
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ return NVPTXISD::Suld1DI32Zero;
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ return NVPTXISD::Suld1DI64Zero;
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ return NVPTXISD::Suld1DV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ return NVPTXISD::Suld1DV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ return NVPTXISD::Suld1DV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ return NVPTXISD::Suld1DV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ return NVPTXISD::Suld1DV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ return NVPTXISD::Suld1DV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ return NVPTXISD::Suld1DV4I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ return NVPTXISD::Suld1DArrayI8Zero;
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ return NVPTXISD::Suld1DArrayI16Zero;
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ return NVPTXISD::Suld1DArrayI32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ return NVPTXISD::Suld1DArrayI64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ return NVPTXISD::Suld1DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ return NVPTXISD::Suld1DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ return NVPTXISD::Suld1DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ return NVPTXISD::Suld1DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ return NVPTXISD::Suld1DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ return NVPTXISD::Suld1DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ return NVPTXISD::Suld1DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ return NVPTXISD::Suld2DI8Zero;
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ return NVPTXISD::Suld2DI16Zero;
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ return NVPTXISD::Suld2DI32Zero;
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ return NVPTXISD::Suld2DI64Zero;
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ return NVPTXISD::Suld2DV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ return NVPTXISD::Suld2DV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ return NVPTXISD::Suld2DV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ return NVPTXISD::Suld2DV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ return NVPTXISD::Suld2DV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ return NVPTXISD::Suld2DV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ return NVPTXISD::Suld2DV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ return NVPTXISD::Suld2DArrayI8Zero;
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ return NVPTXISD::Suld2DArrayI16Zero;
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ return NVPTXISD::Suld2DArrayI32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ return NVPTXISD::Suld2DArrayI64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ return NVPTXISD::Suld2DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ return NVPTXISD::Suld2DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ return NVPTXISD::Suld2DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ return NVPTXISD::Suld2DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ return NVPTXISD::Suld2DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ return NVPTXISD::Suld2DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ return NVPTXISD::Suld2DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ return NVPTXISD::Suld3DI8Zero;
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ return NVPTXISD::Suld3DI16Zero;
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ return NVPTXISD::Suld3DI32Zero;
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ return NVPTXISD::Suld3DI64Zero;
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ return NVPTXISD::Suld3DV2I8Zero;
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ return NVPTXISD::Suld3DV2I16Zero;
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ return NVPTXISD::Suld3DV2I32Zero;
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ return NVPTXISD::Suld3DV2I64Zero;
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ return NVPTXISD::Suld3DV4I8Zero;
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ return NVPTXISD::Suld3DV4I16Zero;
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ return NVPTXISD::Suld3DV4I32Zero;
+ }
+}
+
+// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
+// TgtMemIntrinsic
+// because we need the information that is only available in the "Value" type
+// of destination
+// pointer. In particular, the address space information.
+bool NVPTXTargetLowering::getTgtMemIntrinsic(
+ IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+
+ case Intrinsic::nvvm_atomic_load_add_f32:
+ case Intrinsic::nvvm_atomic_load_inc_32:
+ case Intrinsic::nvvm_atomic_load_dec_32:
+
+ case Intrinsic::nvvm_atomic_add_gen_f_cta:
+ case Intrinsic::nvvm_atomic_add_gen_f_sys:
+ case Intrinsic::nvvm_atomic_add_gen_i_cta:
+ case Intrinsic::nvvm_atomic_add_gen_i_sys:
+ case Intrinsic::nvvm_atomic_and_gen_i_cta:
+ case Intrinsic::nvvm_atomic_and_gen_i_sys:
+ case Intrinsic::nvvm_atomic_cas_gen_i_cta:
+ case Intrinsic::nvvm_atomic_cas_gen_i_sys:
+ case Intrinsic::nvvm_atomic_dec_gen_i_cta:
+ case Intrinsic::nvvm_atomic_dec_gen_i_sys:
+ case Intrinsic::nvvm_atomic_inc_gen_i_cta:
+ case Intrinsic::nvvm_atomic_inc_gen_i_sys:
+ case Intrinsic::nvvm_atomic_max_gen_i_cta:
+ case Intrinsic::nvvm_atomic_max_gen_i_sys:
+ case Intrinsic::nvvm_atomic_min_gen_i_cta:
+ case Intrinsic::nvvm_atomic_min_gen_i_sys:
+ case Intrinsic::nvvm_atomic_or_gen_i_cta:
+ case Intrinsic::nvvm_atomic_or_gen_i_sys:
+ case Intrinsic::nvvm_atomic_exch_gen_i_cta:
+ case Intrinsic::nvvm_atomic_exch_gen_i_sys:
+ case Intrinsic::nvvm_atomic_xor_gen_i_cta:
+ case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = true;
+ Info.align = 0;
+ return true;
+ }
+
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4f32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i8;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i16;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i64;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ return false;
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented
+/// by AM is legal for this target, for a load/store of the specified type.
+/// Used to guide target specific optimizations, like loop strength reduction
+/// (LoopStrengthReduce.cpp) and memory optimization for address mode
+/// (CodeGenPrepare.cpp)
+bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS) const {
+ // AddrMode - This represents an addressing mode of:
+ // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+ //
+ // The legal address modes are
+ // - [avar]
+ // - [areg]
+ // - [areg+immoff]
+ // - [immAddr]
+
+ if (AM.BaseGV) {
+ return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
+ }
+
+ switch (AM.Scale) {
+ case 0: // "r", "r+i" or "i" is allowed
+ break;
+ case 1:
+ if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
+ return false;
+ // Otherwise we have r+i.
+ break;
+ default:
+ // No scale > 1 is allowed
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+NVPTXTargetLowering::ConstraintType
+NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'b':
+ case 'r':
+ case 'h':
+ case 'c':
+ case 'l':
+ case 'f':
+ case 'd':
+ case '0':
+ case 'N':
+ return C_RegisterClass;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'b':
+ return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
+ case 'c':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'h':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'r':
+ return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
+ case 'l':
+ case 'N':
+ return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
+ case 'f':
+ return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
+ case 'd':
+ return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
+ }
+ }
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX DAG Combining
+//===----------------------------------------------------------------------===//
+
+bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
+ CodeGenOpt::Level OptLevel) const {
+ // Always honor command-line argument
+ if (FMAContractLevelOpt.getNumOccurrences() > 0)
+ return FMAContractLevelOpt > 0;
+
+ // Do not contract if we're not optimizing the code.
+ if (OptLevel == 0)
+ return false;
+
+ // Honor TargetOptions flags that explicitly say fusion is okay.
+ if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
+ return true;
+
+ return allowUnsafeFPMath(MF);
+}
+
+bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
+ // Honor TargetOptions flags that explicitly say unsafe math is okay.
+ if (MF.getTarget().Options.UnsafeFPMath)
+ return true;
+
+ // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
+ const Function *F = MF.getFunction();
+ if (F->hasFnAttribute("unsafe-fp-math")) {
+ Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ StringRef Val = Attr.getValueAsString();
+ if (Val == "true")
+ return true;
+ }
+
+ return false;
+}
+
+/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
+/// operands N0 and N1. This is a helper for PerformADDCombine that is
+/// called with the default operands, and if that fails, with commuted
+/// operands.
+static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SelectionDAG &DAG = DCI.DAG;
+ // Skip non-integer, non-scalar case
+ EVT VT=N0.getValueType();
+ if (VT.isVector())
+ return SDValue();
+
+ // fold (add (mul a, b), c) -> (mad a, b, c)
+ //
+ if (N0.getOpcode() == ISD::MUL) {
+ assert (VT.isInteger());
+ // For integer:
+ // Since integer multiply-add costs the same as integer multiply
+ // but is more costly than integer add, do the fusion only when
+ // the mul is only used in the add.
+ if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
+ !N0.getNode()->hasOneUse())
+ return SDValue();
+
+ // Do the folding
+ return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ else if (N0.getOpcode() == ISD::FMUL) {
+ if (VT == MVT::f32 || VT == MVT::f64) {
+ const auto *TLI = static_cast<const NVPTXTargetLowering *>(
+ &DAG.getTargetLoweringInfo());
+ if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
+ return SDValue();
+
+ // For floating point:
+ // Do the fusion only when the mul has less than 5 uses and all
+ // are add.
+ // The heuristic is that if a use is not an add, then that use
+ // cannot be fused into fma, therefore mul is still needed anyway.
+ // If there are more than 4 uses, even if they are all add, fusing
+ // them will increase register pressue.
+ //
+ int numUses = 0;
+ int nonAddCount = 0;
+ for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
+ UE = N0.getNode()->use_end();
+ UI != UE; ++UI) {
+ numUses++;
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::FADD)
+ ++nonAddCount;
+ }
+ if (numUses >= 5)
+ return SDValue();
+ if (nonAddCount) {
+ int orderNo = N->getIROrder();
+ int orderNo2 = N0.getNode()->getIROrder();
+ // simple heuristics here for considering potential register
+ // pressure, the logics here is that the differnce are used
+ // to measure the distance between def and use, the longer distance
+ // more likely cause register pressure.
+ if (orderNo - orderNo2 < 500)
+ return SDValue();
+
+ // Now, check if at least one of the FMUL's operands is live beyond the node N,
+ // which guarantees that the FMA will not increase register pressure at node N.
+ bool opIsLive = false;
+ const SDNode *left = N0.getOperand(0).getNode();
+ const SDNode *right = N0.getOperand(1).getNode();
+
+ if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
+ opIsLive = true;
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::FMA, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ }
+
+ return SDValue();
+}
+
+/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
+///
+static SDValue PerformADDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // First try with the default operand order.
+ if (SDValue Result =
+ PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
+ return Result;
+
+ // If that didn't work, try again with the operands commuted.
+ return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
+}
+
+static SDValue PerformANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // The type legalizer turns a vector load of i8 values into a zextload to i16
+ // registers, optionally ANY_EXTENDs it (if target type is integer),
+ // and ANDs off the high 8 bits. Since we turn this load into a
+ // target-specific DAG node, the DAG combiner fails to eliminate these AND
+ // nodes. Do that here.
+ SDValue Val = N->getOperand(0);
+ SDValue Mask = N->getOperand(1);
+
+ if (isa<ConstantSDNode>(Val)) {
+ std::swap(Val, Mask);
+ }
+
+ SDValue AExt;
+ // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
+ if (Val.getOpcode() == ISD::ANY_EXTEND) {
+ AExt = Val;
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->getOpcode() == NVPTXISD::LoadV2 ||
+ Val->getOpcode() == NVPTXISD::LoadV4) {
+ ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
+ if (!MaskCnst) {
+ // Not an AND with a constant
+ return SDValue();
+ }
+
+ uint64_t MaskVal = MaskCnst->getZExtValue();
+ if (MaskVal != 0xff) {
+ // Not an AND that chops off top 8 bits
+ return SDValue();
+ }
+
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
+ if (!Mem) {
+ // Not a MemSDNode?!?
+ return SDValue();
+ }
+
+ EVT MemVT = Mem->getMemoryVT();
+ if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
+ // We only handle the i8 case
+ return SDValue();
+ }
+
+ unsigned ExtType =
+ cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
+ getZExtValue();
+ if (ExtType == ISD::SEXTLOAD) {
+ // If for some reason the load is a sextload, the and is needed to zero
+ // out the high 8 bits
+ return SDValue();
+ }
+
+ bool AddTo = false;
+ if (AExt.getNode() != nullptr) {
+ // Re-insert the ext as a zext.
+ Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
+ AExt.getValueType(), Val);
+ AddTo = true;
+ }
+
+ // If we get here, the AND is unnecessary. Just replace it with the load
+ DCI.CombineTo(N, Val, AddTo);
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformREMCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
+
+ // Don't do anything at less than -O2.
+ if (OptLevel < CodeGenOpt::Default)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ bool IsSigned = N->getOpcode() == ISD::SREM;
+ unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
+
+ const SDValue &Num = N->getOperand(0);
+ const SDValue &Den = N->getOperand(1);
+
+ for (const SDNode *U : Num->uses()) {
+ if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
+ U->getOperand(1) == Den) {
+ // Num % Den -> Num - (Num / Den) * Den
+ return DAG.getNode(ISD::SUB, DL, VT, Num,
+ DAG.getNode(ISD::MUL, DL, VT,
+ DAG.getNode(DivOpc, DL, VT, Num, Den),
+ Den));
+ }
+ }
+ return SDValue();
+}
+
+enum OperandSignedness {
+ Signed = 0,
+ Unsigned,
+ Unknown
+};
+
+/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
+/// that can be demoted to \p OptSize bits without loss of information. The
+/// signedness of the operand, if determinable, is placed in \p S.
+static bool IsMulWideOperandDemotable(SDValue Op,
+ unsigned OptSize,
+ OperandSignedness &S) {
+ S = Unknown;
+
+ if (Op.getOpcode() == ISD::SIGN_EXTEND ||
+ Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Signed;
+ return true;
+ }
+ } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Unsigned;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
+/// be demoted to \p OptSize bits without loss of information. If the operands
+/// contain a constant, it should appear as the RHS operand. The signedness of
+/// the operands is placed in \p IsSigned.
+static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
+ unsigned OptSize,
+ bool &IsSigned) {
+ OperandSignedness LHSSign;
+
+ // The LHS operand must be a demotable op
+ if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
+ return false;
+
+ // We should have been able to determine the signedness from the LHS
+ if (LHSSign == Unknown)
+ return false;
+
+ IsSigned = (LHSSign == Signed);
+
+ // The RHS can be a demotable op or a constant
+ if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
+ const APInt &Val = CI->getAPIntValue();
+ if (LHSSign == Unsigned) {
+ return Val.isIntN(OptSize);
+ } else {
+ return Val.isSignedIntN(OptSize);
+ }
+ } else {
+ OperandSignedness RHSSign;
+ if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
+ return false;
+
+ return LHSSign == RHSSign;
+ }
+}
+
+/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
+/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
+/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
+/// amount.
+static SDValue TryMULWIDECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT MulType = N->getValueType(0);
+ if (MulType != MVT::i32 && MulType != MVT::i64) {
+ return SDValue();
+ }
+
+ SDLoc DL(N);
+ unsigned OptSize = MulType.getSizeInBits() >> 1;
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // Canonicalize the multiply so the constant (if any) is on the right
+ if (N->getOpcode() == ISD::MUL) {
+ if (isa<ConstantSDNode>(LHS)) {
+ std::swap(LHS, RHS);
+ }
+ }
+
+ // If we have a SHL, determine the actual multiply amount
+ if (N->getOpcode() == ISD::SHL) {
+ ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
+ if (!ShlRHS) {
+ return SDValue();
+ }
+
+ APInt ShiftAmt = ShlRHS->getAPIntValue();
+ unsigned BitWidth = MulType.getSizeInBits();
+ if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
+ APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
+ RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
+ } else {
+ return SDValue();
+ }
+ }
+
+ bool Signed;
+ // Verify that our operands are demotable
+ if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
+ return SDValue();
+ }
+
+ EVT DemotedVT;
+ if (MulType == MVT::i32) {
+ DemotedVT = MVT::i16;
+ } else {
+ DemotedVT = MVT::i32;
+ }
+
+ // Truncate the operands to the correct size. Note that these are just for
+ // type consistency and will (likely) be eliminated in later phases.
+ SDValue TruncLHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
+ SDValue TruncRHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
+
+ unsigned Opc;
+ if (Signed) {
+ Opc = NVPTXISD::MUL_WIDE_SIGNED;
+ } else {
+ Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
+ }
+
+ return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
+}
+
+/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
+static SDValue PerformMULCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
+static SDValue PerformSHLCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformSETCCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT CCType = N->getValueType(0);
+ SDValue A = N->getOperand(0);
+ SDValue B = N->getOperand(1);
+
+ if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
+ return SDValue();
+
+ SDLoc DL(N);
+ // setp.f16x2 returns two scalar predicates, which we need to
+ // convert back to v2i1. The returned result will be scalarized by
+ // the legalizer, but the comparison will remain a single vector
+ // instruction.
+ SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
+ DCI.DAG.getVTList(MVT::i1, MVT::i1),
+ {A, B, N->getOperand(2)});
+ return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
+ CCNode.getValue(1));
+}
+
+SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::FADD:
+ return PerformADDCombine(N, DCI, STI, OptLevel);
+ case ISD::MUL:
+ return PerformMULCombine(N, DCI, OptLevel);
+ case ISD::SHL:
+ return PerformSHLCombine(N, DCI, OptLevel);
+ case ISD::AND:
+ return PerformANDCombine(N, DCI);
+ case ISD::UREM:
+ case ISD::SREM:
+ return PerformREMCombine(N, DCI, OptLevel);
+ case ISD::SETCC:
+ return PerformSETCCCombine(N, DCI);
+ }
+ return SDValue();
+}
+
+/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
+static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ EVT ResVT = N->getValueType(0);
+ SDLoc DL(N);
+
+ assert(ResVT.isVector() && "Vector load must have vector type");
+
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 loads of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ assert(ResVT.isSimple() && "Can only handle simple types");
+ switch (ResVT.getSimpleVT().SimpleTy) {
+ default:
+ return;
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+
+ unsigned Align = LD->getAlignment();
+ auto &TD = DAG.getDataLayout();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This load is not sufficiently aligned, so bail out and let this vector
+ // load be scalarized. Note that we may still be able to emit smaller
+ // vector loads. For example, if we are loading a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return;
+ }
+
+ EVT EltVT = ResVT.getVectorElementType();
+ unsigned NumElts = ResVT.getVectorNumElements();
+
+ // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+ bool LoadF16x2 = false;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ Opcode = NVPTXISD::LoadV2;
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ case 8: {
+ // v8f16 is a special case. PTX doesn't have ld.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // load them with ld.v4.b32.
+ assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
+ LoadF16x2 = true;
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
+ MVT::Other};
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ // Copy regular operands
+ SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
+
+ // The select routine does not have access to the LoadSDNode instance, so
+ // pass along the extension information
+ OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ LD->getMemoryVT(),
+ LD->getMemOperand());
+
+ SmallVector<SDValue, 8> ScalarRes;
+ if (LoadF16x2) {
+ // Split v2f16 subvectors back into individual elements.
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue SubVector = NewLD.getValue(i);
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(1, DL));
+ ScalarRes.push_back(E0);
+ ScalarRes.push_back(E1);
+ }
+ } else {
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+}
+
+static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ SDValue Chain = N->getOperand(0);
+ SDValue Intrin = N->getOperand(1);
+ SDLoc DL(N);
+
+ // Get the intrinsic ID
+ unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ EVT ResVT = N->getValueType(0);
+
+ if (ResVT.isVector()) {
+ // Vector LDG/LDU
+
+ unsigned NumElts = ResVT.getVectorNumElements();
+ EVT EltVT = ResVT.getVectorElementType();
+
+ // Since LDU/LDG are target nodes, we cannot rely on DAG type
+ // legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV2;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV2;
+ break;
+ }
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV4;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV4;
+ break;
+ }
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ SmallVector<SDValue, 8> OtherOps;
+
+ // Copy regular operands
+
+ OtherOps.push_back(Chain); // Chain
+ // Skip operand 1 (intrinsic ID)
+ // Others
+ OtherOps.append(N->op_begin() + 2, N->op_end());
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ MemSD->getMemoryVT(),
+ MemSD->getMemOperand());
+
+ SmallVector<SDValue, 4> ScalarRes;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res =
+ DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec =
+ DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+ } else {
+ // i8 LDG/LDU
+ assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
+ "Custom handling of non-i8 ldu/ldg?");
+
+ // Just copy all operands as-is
+ SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
+
+ // Force output to i16
+ SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ // We make sure the memory type is i8, which will be used during isel
+ // to select the proper instruction.
+ SDValue NewLD =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
+ MVT::i8, MemSD->getMemOperand());
+
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
+ NewLD.getValue(0)));
+ Results.push_back(NewLD.getValue(1));
+ }
+ }
+ }
+}
+
+void NVPTXTargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ default:
+ report_fatal_error("Unhandled custom legalization");
+ case ISD::LOAD:
+ ReplaceLoadVector(N, DAG, Results);
+ return;
+ case ISD::INTRINSIC_W_CHAIN:
+ ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
+ return;
+ }
+}
+
+// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
+void NVPTXSection::anchor() {}
+
+NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
+ delete static_cast<NVPTXSection *>(TextSection);
+ delete static_cast<NVPTXSection *>(DataSection);
+ delete static_cast<NVPTXSection *>(BSSSection);
+ delete static_cast<NVPTXSection *>(ReadOnlySection);
+
+ delete static_cast<NVPTXSection *>(StaticCtorSection);
+ delete static_cast<NVPTXSection *>(StaticDtorSection);
+ delete static_cast<NVPTXSection *>(LSDASection);
+ delete static_cast<NVPTXSection *>(EHFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
+ delete static_cast<NVPTXSection *>(DwarfInfoSection);
+ delete static_cast<NVPTXSection *>(DwarfLineSection);
+ delete static_cast<NVPTXSection *>(DwarfFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
+ delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
+ delete static_cast<NVPTXSection *>(DwarfStrSection);
+ delete static_cast<NVPTXSection *>(DwarfLocSection);
+ delete static_cast<NVPTXSection *>(DwarfARangesSection);
+ delete static_cast<NVPTXSection *>(DwarfRangesSection);
+ delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
+}
+
+MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
+ const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
+ return getDataSection();
+}
diff --git a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 2b847414b8a8..9378b29a9d0e 100644
--- a/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/contrib/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -1,3165 +1,3164 @@
-//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the PTX instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-include "NVPTXInstrFormats.td"
-
-// A NOP instruction
-let hasSideEffects = 0 in {
- def NOP : NVPTXInst<(outs), (ins), "", []>;
-}
-
-let OperandType = "OPERAND_IMMEDIATE" in {
- def f16imm : Operand<f16>;
-}
-
-// List of vector specific properties
-def isVecLD : VecInstTypeEnum<1>;
-def isVecST : VecInstTypeEnum<2>;
-def isVecBuild : VecInstTypeEnum<3>;
-def isVecShuffle : VecInstTypeEnum<4>;
-def isVecExtract : VecInstTypeEnum<5>;
-def isVecInsert : VecInstTypeEnum<6>;
-def isVecDest : VecInstTypeEnum<7>;
-def isVecOther : VecInstTypeEnum<15>;
-
-//===----------------------------------------------------------------------===//
-// NVPTX Operand Definitions.
-//===----------------------------------------------------------------------===//
-
-def brtarget : Operand<OtherVT>;
-
-// CVT conversion modes
-// These must match the enum in NVPTX.h
-def CvtNONE : PatLeaf<(i32 0x0)>;
-def CvtRNI : PatLeaf<(i32 0x1)>;
-def CvtRZI : PatLeaf<(i32 0x2)>;
-def CvtRMI : PatLeaf<(i32 0x3)>;
-def CvtRPI : PatLeaf<(i32 0x4)>;
-def CvtRN : PatLeaf<(i32 0x5)>;
-def CvtRZ : PatLeaf<(i32 0x6)>;
-def CvtRM : PatLeaf<(i32 0x7)>;
-def CvtRP : PatLeaf<(i32 0x8)>;
-
-def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
-def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
-def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
-def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
-def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
-def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
-def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
-def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
-def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
-
-def CvtSAT : PatLeaf<(i32 0x20)>;
-def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
-
-def CvtMode : Operand<i32> {
- let PrintMethod = "printCvtMode";
-}
-
-// Compare modes
-// These must match the enum in NVPTX.h
-def CmpEQ : PatLeaf<(i32 0)>;
-def CmpNE : PatLeaf<(i32 1)>;
-def CmpLT : PatLeaf<(i32 2)>;
-def CmpLE : PatLeaf<(i32 3)>;
-def CmpGT : PatLeaf<(i32 4)>;
-def CmpGE : PatLeaf<(i32 5)>;
-def CmpEQU : PatLeaf<(i32 10)>;
-def CmpNEU : PatLeaf<(i32 11)>;
-def CmpLTU : PatLeaf<(i32 12)>;
-def CmpLEU : PatLeaf<(i32 13)>;
-def CmpGTU : PatLeaf<(i32 14)>;
-def CmpGEU : PatLeaf<(i32 15)>;
-def CmpNUM : PatLeaf<(i32 16)>;
-def CmpNAN : PatLeaf<(i32 17)>;
-
-def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
-def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
-def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
-def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
-def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
-def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
-def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
-def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
-def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
-def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
-def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
-def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
-def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
-def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
-
-def CmpMode : Operand<i32> {
- let PrintMethod = "printCmpMode";
-}
-def VecElement : Operand<i32> {
- let PrintMethod = "printVecElement";
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Instruction Predicate Definitions
-//===----------------------------------------------------------------------===//
-
-
-def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
-def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
-def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
-def useAtomRedG32forGen32 :
- Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
-def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
-def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
-def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
-def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
-def useAtomRedG64forGen64 :
- Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
-def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
-def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
-def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
-def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
-def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
-def hasVote : Predicate<"Subtarget->hasVote()">;
-def hasDouble : Predicate<"Subtarget->hasDouble()">;
-def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
-def hasLDG : Predicate<"Subtarget->hasLDG()">;
-def hasLDU : Predicate<"Subtarget->hasLDU()">;
-def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
-
-def doF32FTZ : Predicate<"useF32FTZ()">;
-def doNoF32FTZ : Predicate<"!useF32FTZ()">;
-
-def doMulWide : Predicate<"doMulWide">;
-
-def allowFMA : Predicate<"allowFMA()">;
-def noFMA : Predicate<"!allowFMA()">;
-def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
-
-def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
-def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
-
-def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
-def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
-
-def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
-def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
-
-def true : Predicate<"true">;
-
-def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
-
-def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
-
-//===----------------------------------------------------------------------===//
-// Some Common Instruction Class Templates
-//===----------------------------------------------------------------------===//
-
-// Template for instructions which take three int64, int32, or int16 args.
-// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
-multiclass I3<string OpcStr, SDNode OpNode> {
- def i64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def i64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def i16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
-}
-
-// Template for instructions which take 3 int32 args. The instructions are
-// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
-multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
-}
-
-// Template for instructions which take three fp64 or fp32 args. The
-// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
-//
-// Also defines ftz (flush subnormal inputs and results to sign-preserving
-// zero) variants for fp32 functions.
-//
-// This multiclass should be used for nodes that cannot be folded into FMAs.
-// For nodes that can be folded into FMAs (i.e. adds and muls), use
-// F3_fma_component.
-multiclass F3<string OpcStr, SDNode OpNode> {
- def f64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
- def f64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
- def f32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ]>;
- def f32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ]>;
- def f32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
- def f32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
-}
-
-// Template for instructions which take three FP args. The
-// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
-//
-// Also defines ftz (flush subnormal inputs and results to sign-preserving
-// zero) variants for fp32/fp16 functions.
-//
-// This multiclass should be used for nodes that can be folded to make fma ops.
-// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
-// just like the non ".rn" op, but prevents ptxas from creating FMAs.
-multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
- def f64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[allowFMA]>;
- def f64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
- def f32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA]>;
- def f32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
-
- def f16rr_ftz :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;
- def f16rr :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, allowFMA]>;
-
- def f16x2rr_ftz :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;
- def f16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, allowFMA]>;
-
- // These have strange names so we don't perturb existing mir tests.
- def _rnf64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[noFMA]>;
- def _rnf64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def _rnf32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def _rnf32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def _rnf32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA]>;
- def _rnf32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def _rnf16rr_ftz :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, noFMA, doF32FTZ]>;
- def _rnf16rr :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, noFMA]>;
- def _rnf16x2rr_ftz :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, noFMA, doF32FTZ]>;
- def _rnf16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, noFMA]>;
-}
-
-// Template for operations which take two f32 or f64 operands. Provides three
-// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
-// subnormal inputs and results to zero).
-multiclass F2<string OpcStr, SDNode OpNode> {
- def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
- !strconcat(OpcStr, ".f64 \t$dst, $a;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
- def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
- Requires<[doF32FTZ]>;
- def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Instructions.
-//===----------------------------------------------------------------------===//
-
-//-----------------------------------
-// Type Conversion
-//-----------------------------------
-
-let hasSideEffects = 0 in {
- // Generate a cvt to the given type from all possible types. Each instance
- // takes a CvtMode immediate that defines the conversion mode to use. It can
- // be CvtNONE to omit a conversion mode.
- multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
- def _s8 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s8 \t$dst, $src;"), []>;
- def _u8 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u8 \t$dst, $src;"), []>;
- def _s16 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s16 \t$dst, $src;"), []>;
- def _u16 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u16 \t$dst, $src;"), []>;
- def _s32 :
- NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s32 \t$dst, $src;"), []>;
- def _u32 :
- NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u32 \t$dst, $src;"), []>;
- def _s64 :
- NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s64 \t$dst, $src;"), []>;
- def _u64 :
- NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u64 \t$dst, $src;"), []>;
- def _f16 :
- NVPTXInst<(outs RC:$dst),
- (ins Float16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f16 \t$dst, $src;"), []>;
- def _f32 :
- NVPTXInst<(outs RC:$dst),
- (ins Float32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f32 \t$dst, $src;"), []>;
- def _f64 :
- NVPTXInst<(outs RC:$dst),
- (ins Float64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f64 \t$dst, $src;"), []>;
- }
-
- // Generate cvts from all types to all types.
- defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
- defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
- defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
- defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
- defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
- defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
- defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
- defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
- defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
- defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
- defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
-
- // These cvts are different from those above: The source and dest registers
- // are of the same type.
- def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.s16.s8 \t$dst, $src;", []>;
- def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s8 \t$dst, $src;", []>;
- def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s16 \t$dst, $src;", []>;
- def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s8 \t$dst, $src;", []>;
- def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s16 \t$dst, $src;", []>;
- def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s32 \t$dst, $src;", []>;
-}
-
-//-----------------------------------
-// Integer Arithmetic
-//-----------------------------------
-
-// Template for xor masquerading as int1 arithmetic.
-multiclass ADD_SUB_i1<SDNode OpNode> {
- def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
-}
-
-// int1 addition and subtraction are both just xor.
-defm ADD_i1 : ADD_SUB_i1<add>;
-defm SUB_i1 : ADD_SUB_i1<sub>;
-
-// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
-// also use these for unsigned arithmetic.
-defm ADD : I3<"add.s", add>;
-defm SUB : I3<"sub.s", sub>;
-
-// int32 addition and subtraction with carry-out.
-// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
-defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
-defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
-
-// int32 addition and subtraction with carry-in and carry-out.
-defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
-defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
-
-defm MULT : I3<"mul.lo.s", mul>;
-
-defm MULTHS : I3<"mul.hi.s", mulhs>;
-defm MULTHU : I3<"mul.hi.u", mulhu>;
-
-defm SDIV : I3<"div.s", sdiv>;
-defm UDIV : I3<"div.u", udiv>;
-
-// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
-// will lower it.
-defm SREM : I3<"rem.s", srem>;
-defm UREM : I3<"rem.u", urem>;
-
-// Integer absolute value. NumBits should be one minus the bit width of RC.
-// This idiom implements the algorithm at
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
-multiclass ABS<RegisterClass RC, int NumBits, string SizeName> {
- def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
- !strconcat("abs", SizeName, " \t$dst, $a;"),
- [(set RC:$dst, (xor (add (sra RC:$a, (i32 NumBits)), RC:$a),
- (sra RC:$a, (i32 NumBits))))]>;
-}
-defm ABS_16 : ABS<Int16Regs, 15, ".s16">;
-defm ABS_32 : ABS<Int32Regs, 31, ".s32">;
-defm ABS_64 : ABS<Int64Regs, 63, ".s64">;
-
-// Integer min/max.
-defm SMAX : I3<"max.s", smax>;
-defm UMAX : I3<"max.u", umax>;
-defm SMIN : I3<"min.s", smin>;
-defm UMIN : I3<"min.u", umin>;
-
-//
-// Wide multiplication
-//
-def MULWIDES64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-
-def MULWIDEU64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-
-def MULWIDES32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-
-def MULWIDEU32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-
-def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
-def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
-def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
-
-// Matchers for signed, unsigned mul.wide ISD nodes.
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
- (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
- (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
- (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
- (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-// Predicates used for converting some patterns to mul.wide.
-def SInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(32);
-}]>;
-
-def UInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(32);
-}]>;
-
-def SInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(16);
-}]>;
-
-def UInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(16);
-}]>;
-
-def Int5Const : PatLeaf<(imm), [{
- // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(32);
-}]>;
-
-def Int4Const : PatLeaf<(imm), [{
- // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(16);
-}]>;
-
-def SHL2MUL32 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(32, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
-}]>;
-
-def SHL2MUL16 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(16, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
-}]>;
-
-// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
-def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
- (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
-def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
- (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
- (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
-def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
- (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
-
-// Convert "sign/zero-extend then multiply" to mul.wide.
-def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
- (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
- (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
- (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
- (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-//
-// Integer multiply-add
-//
-def SDTIMAD :
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
- SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
-def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
-
-def MAD16rrr :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
-def MAD16rri :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
-def MAD16rir :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
-def MAD16rii :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD32rrr :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
-def MAD32rri :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
-def MAD32rir :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
-def MAD32rii :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD64rrr :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
-def MAD64rri :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
-def MAD64rir :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
-def MAD64rii :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
-
-def INEG16 :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "neg.s16 \t$dst, $src;",
- [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
-def INEG32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "neg.s32 \t$dst, $src;",
- [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
-def INEG64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "neg.s64 \t$dst, $src;",
- [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
-
-//-----------------------------------
-// Floating Point Arithmetic
-//-----------------------------------
-
-// Constant 1.0f
-def FloatConst1 : PatLeaf<(fpimm), [{
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
- N->getValueAPF().convertToFloat() == 1.0f;
-}]>;
-// Constant 1.0 (double)
-def DoubleConst1 : PatLeaf<(fpimm), [{
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
- N->getValueAPF().convertToDouble() == 1.0;
-}]>;
-
-// Loads FP16 constant into a register.
-//
-// ptxas does not have hex representation for fp16, so we can't use
-// fp16 immediate values in .f16 instructions. Instead we have to load
-// the constant into a register using mov.b16.
-def LOAD_CONST_F16 :
- NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
- "mov.b16 \t$dst, $a;", []>;
-
-defm FADD : F3_fma_component<"add", fadd>;
-defm FSUB : F3_fma_component<"sub", fsub>;
-defm FMUL : F3_fma_component<"mul", fmul>;
-
-defm FMIN : F3<"min", fminnum>;
-defm FMAX : F3<"max", fmaxnum>;
-
-defm FABS : F2<"abs", fabs>;
-defm FNEG : F2<"neg", fneg>;
-defm FSQRT : F2<"sqrt.rn", fsqrt>;
-
-//
-// F64 division
-//
-def FDIV641r :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins f64imm:$a, Float64Regs:$b),
- "rcp.rn.f64 \t$dst, $b;",
- [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
-def FDIV64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
-def FDIV64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
-
-//
-// F32 Approximate reciprocal
-//
-def FDIV321r_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV321r :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-//
-// F32 Approximate division
-//
-def FDIV32approxrr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxrr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-def FDIV32approxri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-//
-// F32 Semi-accurate reciprocal
-//
-// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
-//
-def FDIV321r_approx_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV321r_approx :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-//
-// F32 Semi-accurate division
-//
-def FDIV32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-def FDIV32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-//
-// F32 Accurate reciprocal
-//
-def FDIV321r_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20, doF32FTZ]>;
-def FDIV321r_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-//
-// F32 Accurate division
-//
-def FDIV32rr_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32ri_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32rr_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-def FDIV32ri_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[reqPTX20]>;
-
-//
-// FMA
-//
-
-multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
- Requires<[Pred]>;
- def rri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, ImmCls:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
- Requires<[Pred]>;
- def rir : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
- Requires<[Pred]>;
- def rii : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, ImmCls:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
- Requires<[Pred]>;
-}
-
-multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
- Requires<[useFP16Math, Pred]>;
-}
-
-defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
-defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
-defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
-defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
-defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
-defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
-defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
-
-// sin/cos
-def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "sin.approx.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
- Requires<[allowUnsafeFPMath]>;
-def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "cos.approx.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
- Requires<[allowUnsafeFPMath]>;
-
-// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
-// i.e. "poor man's fmod()"
-
-// frem - f32 FTZ
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
- (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
- Float32Regs:$y))>,
- Requires<[doF32FTZ]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
- (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
- fpimm:$y))>,
- Requires<[doF32FTZ]>;
-
-// frem - f32
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
- (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
- (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
- Float32Regs:$y))>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
- (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
- (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
- fpimm:$y))>;
-
-// frem - f64
-def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
- (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
- (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
- Float64Regs:$y))>;
-def : Pat<(frem Float64Regs:$x, fpimm:$y),
- (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
- (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
- fpimm:$y))>;
-
-//-----------------------------------
-// Bitwise operations
-//-----------------------------------
-
-// Template for three-arg bitwise operations. Takes three args, Creates .b16,
-// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
-multiclass BITWISE<string OpcStr, SDNode OpNode> {
- def b1rr :
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def b1ri :
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
- def b16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def b16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
- def b32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def b32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def b64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def b64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
-}
-
-defm OR : BITWISE<"or", or>;
-defm AND : BITWISE<"and", and>;
-defm XOR : BITWISE<"xor", xor>;
-
-def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
- "not.pred \t$dst, $src;",
- [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
-def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "not.b16 \t$dst, $src;",
- [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
-def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "not.b32 \t$dst, $src;",
- [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
-def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "not.b64 \t$dst, $src;",
- [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
-
-// Template for left/right shifts. Takes three operands,
-// [dest (reg), src (reg), shift (reg or imm)].
-// dest and src may be int64, int32, or int16, but shift is always int32.
-//
-// This template also defines a 32-bit shift (imm, imm) instruction.
-multiclass SHIFT<string OpcStr, SDNode OpNode> {
- def i64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
- def i64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
- def i32ii :
- NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
- def i16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
- def i16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
-}
-
-defm SHL : SHIFT<"shl.b", shl>;
-defm SRA : SHIFT<"shr.s", sra>;
-defm SRL : SHIFT<"shr.u", srl>;
-
-// Bit-reverse
-def BREV32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
- "brev.b32 \t$dst, $a;",
- [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
-def BREV64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
- "brev.b64 \t$dst, $a;",
- [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
-
-//
-// Rotate: Use ptx shf instruction if available.
-//
-
-// 32 bit r2 = rotl r1, n
-// =>
-// r2 = shf.l r1, r1, n
-def ROTL32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTL32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-// 32 bit r2 = rotr r1, n
-// =>
-// r2 = shf.r r1, r1, n
-def ROTR32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTR32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
-def ROT32imm_sw :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- "shl.b32 \t%lhs, $src, $amt1;\n\t"
- "shr.b32 \t%rhs, $src, $amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
- Requires<[noHWROT32]>;
-def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate left by register.
-def ROTL32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shl.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shr.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate right by register.
-def ROTR32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shr.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shl.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
-def ROT64imm_sw :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- "shl.b64 \t%lhs, $src, $amt1;\n\t"
- "shr.b64 \t%rhs, $src, $amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_64 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
-def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
-
-// 64-bit software rotate left by register.
-def ROTL64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "shl.b64 \t%lhs, $src, $amt;\n\t"
- "sub.u32 \t%amt2, 64, $amt;\n\t"
- "shr.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
-
-def ROTR64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "shr.b64 \t%lhs, $src, $amt;\n\t"
- "sub.u32 \t%amt2, 64, $amt;\n\t"
- "shl.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
-
-//
-// Funnnel shift in clamp mode
-//
-
-// Create SDNodes so they can be used in the DAG code, e.g.
-// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
-def SDTIntShiftDOp :
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisInt<0>, SDTCisInt<3>]>;
-def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
-def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
-
-def FUNSHFLCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
-
-def FUNSHFRCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
-
-//
-// BFE - bit-field extract
-//
-
-// Template for BFE instructions. Takes four args,
-// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
-// Start may be an imm only if end is also an imm. FIXME: Is this a
-// restriction in PTX?
-//
-// dest and src may be int32 or int64, but start and end are always int32.
-multiclass BFE<string TyStr, RegisterClass RC> {
- def rrr
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
- def rri
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, Int32Regs:$b, i32imm:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
- def rii
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, i32imm:$b, i32imm:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
-}
-
-let hasSideEffects = 0 in {
- defm BFE_S32 : BFE<"s32", Int32Regs>;
- defm BFE_U32 : BFE<"u32", Int32Regs>;
- defm BFE_S64 : BFE<"s64", Int64Regs>;
- defm BFE_U64 : BFE<"u64", Int64Regs>;
-}
-
-//-----------------------------------
-// Comparison instructions (setp, set)
-//-----------------------------------
-
-// FIXME: This doesn't cover versions of set and setp that combine with a
-// boolean predicate, e.g. setp.eq.and.b16.
-
-let hasSideEffects = 0 in {
- multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr :
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- def ri :
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- def ir :
- NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- }
-}
-
-defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
-defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
-defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
-defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
-defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
-defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
-defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
-defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
-defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
-defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
-defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
-def SETP_f16rr :
- NVPTXInst<(outs Int1Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
- "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
- []>, Requires<[useFP16Math]>;
-
-def SETP_f16x2rr :
- NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
- (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
- "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
- []>,
- Requires<[useFP16Math]>;
-
-
-// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
-// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
-// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
-
-let hasSideEffects = 0 in {
- multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- def ri : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- def ir : NVPTXInst<(outs Int32Regs:$dst),
- (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- }
-}
-
-defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
-defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
-defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
-defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
-defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
-defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
-defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
-defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
-defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
-defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
-defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
-defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
-
-//-----------------------------------
-// Selection instructions (selp)
-//-----------------------------------
-
-// FIXME: Missing slct
-
-// selp instructions that don't have any pattern matches; we explicitly use
-// them within this file.
-let hasSideEffects = 0 in {
- multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ir : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ii : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- }
-
- multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
- SDNode ImmNode> {
- def rr :
- NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
- def ri :
- NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
- def ir :
- NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
- def ii :
- NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
- }
-}
-
-// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
-// good.
-defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
-defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
-defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
-defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
-defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
-defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
-defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
-defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
-defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
-defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
-defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
-defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
-
-def SELP_f16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Float16x2Regs:$dst,
- (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
-
-//-----------------------------------
-// Data Movement (Load / Store, Move)
-//-----------------------------------
-
-def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
- [SDNPWantRoot]>;
-def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
- [SDNPWantRoot]>;
-
-def MEMri : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops Int32Regs, i32imm);
-}
-def MEMri64 : Operand<i64> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops Int64Regs, i64imm);
-}
-
-def imem : Operand<iPTR> {
- let PrintMethod = "printOperand";
-}
-
-def imemAny : Operand<iPTRAny> {
- let PrintMethod = "printOperand";
-}
-
-def LdStCode : Operand<i32> {
- let PrintMethod = "printLdStCode";
-}
-
-def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
-def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
-
-// Load a memory address into a u32 or u64 register.
-def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
- "mov.u32 \t$dst, $a;",
- [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
- "mov.u64 \t$dst, $a;",
- [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-
-// Get pointer to local stack.
-let hasSideEffects = 0 in {
- def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
- "mov.u32 \t$d, __local_depot$num;", []>;
- def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
- "mov.u64 \t$d, __local_depot$num;", []>;
-}
-
-
-// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
-let IsSimpleMove=1, hasSideEffects=0 in {
- def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
- "mov.pred \t$dst, $sss;", []>;
- def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
- "mov.u16 \t$dst, $sss;", []>;
- def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
- "mov.u32 \t$dst, $sss;", []>;
- def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
- "mov.u64 \t$dst, $sss;", []>;
-
- def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
- // We have to use .b16 here as there's no mov.f16.
- "mov.b16 \t$dst, $src;", []>;
- def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "mov.f32 \t$dst, $src;", []>;
- def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
- "mov.f64 \t$dst, $src;", []>;
-}
-
-def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
- "mov.pred \t$dst, $src;",
- [(set Int1Regs:$dst, imm:$src)]>;
-def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
- "mov.u16 \t$dst, $src;",
- [(set Int16Regs:$dst, imm:$src)]>;
-def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
- "mov.u32 \t$dst, $src;",
- [(set Int32Regs:$dst, imm:$src)]>;
-def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
- "mov.u64 \t$dst, $src;",
- [(set Int64Regs:$dst, imm:$src)]>;
-
-def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
- "mov.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, fpimm:$src)]>;
-def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
- "mov.f64 \t$dst, $src;",
- [(set Float64Regs:$dst, fpimm:$src)]>;
-
-def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
-
-//---- Copy Frame Index ----
-def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
- "add.u32 \t$dst, ${addr:add};",
- [(set Int32Regs:$dst, ADDRri:$addr)]>;
-def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
- "add.u64 \t$dst, ${addr:add};",
- [(set Int64Regs:$dst, ADDRri64:$addr)]>;
-
-//-----------------------------------
-// Comparison and Selection
-//-----------------------------------
-
-multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
- Instruction setp_16rr,
- Instruction setp_16ri,
- Instruction setp_16ir,
- Instruction setp_32rr,
- Instruction setp_32ri,
- Instruction setp_32ir,
- Instruction setp_64rr,
- Instruction setp_64ri,
- Instruction setp_64ir,
- Instruction set_16rr,
- Instruction set_16ri,
- Instruction set_16ir,
- Instruction set_32rr,
- Instruction set_32ri,
- Instruction set_32ir,
- Instruction set_64rr,
- Instruction set_64ri,
- Instruction set_64ir> {
- // i16 -> pred
- def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
- (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
- (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
- (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
- // i32 -> pred
- def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
- (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
- (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
- (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
- // i64 -> pred
- def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
- (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
- (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
- (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
-
- // i16 -> i32
- def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
- (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
- (set_16ri Int16Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
- (set_16ir imm:$a, Int16Regs:$b, Mode)>;
- // i32 -> i32
- def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
- (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
- (set_32ri Int32Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
- (set_32ir imm:$a, Int32Regs:$b, Mode)>;
- // i64 -> i32
- def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
- (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
- (set_64ri Int64Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
- (set_64ir imm:$a, Int64Regs:$b, Mode)>;
-}
-
-multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
- : ISET_FORMAT<OpNode, Mode,
- SETP_s16rr, SETP_s16ri, SETP_s16ir,
- SETP_s32rr, SETP_s32ri, SETP_s32ir,
- SETP_s64rr, SETP_s64ri, SETP_s64ir,
- SET_s16rr, SET_s16ri, SET_s16ir,
- SET_s32rr, SET_s32ri, SET_s32ir,
- SET_s64rr, SET_s64ri, SET_s64ir> {
- // TableGen doesn't like empty multiclasses.
- def : PatLeaf<(i32 0)>;
-}
-
-multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
- : ISET_FORMAT<OpNode, Mode,
- SETP_u16rr, SETP_u16ri, SETP_u16ir,
- SETP_u32rr, SETP_u32ri, SETP_u32ir,
- SETP_u64rr, SETP_u64ri, SETP_u64ir,
- SET_u16rr, SET_u16ri, SET_u16ir,
- SET_u32rr, SET_u32ri, SET_u32ir,
- SET_u64rr, SET_u64ri, SET_u64ir> {
- // TableGen doesn't like empty multiclasses.
- def : PatLeaf<(i32 0)>;
-}
-
-defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
-defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
-defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
-defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
-defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
-defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
-defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
-defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
-defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
-defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
-defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
-defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
-
-// i1 compares
-def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
-def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
-
-def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-
-// i1 compare -> i32
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
- (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
- (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-
-
-
-multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
- // f16 -> pred
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
-
- // f32 -> pred
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
- (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
- (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
- (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
- (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
-
- // f64 -> pred
- def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
- (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
- (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
- (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
-
- // f16 -> i32
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
-
- // f32 -> i32
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
- (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
- (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
- (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
- (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
-
- // f64 -> i32
- def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
- (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
- (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
- (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
-}
-
-defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
-defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
-defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
-defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
-defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
-defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
-
-defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
-defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
-defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
-defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
-defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
-defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
-
-defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
-defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
-defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
-defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
-defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
-defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
-
-defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
-defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
-
-// FIXME: What is this doing here? Can it be deleted?
-// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
-// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-
-def SDTDeclareParamProfile :
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTDeclareScalarParamProfile :
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
-def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
-def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
-def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
-def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
-def SDTCallValProfile : SDTypeProfile<1, 0, []>;
-def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
-def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
-def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
-def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
-def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
-
-def DeclareParam :
- SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareScalarParam :
- SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRetParam :
- SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRet :
- SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LoadParam :
- SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV2 :
- SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV4 :
- SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def PrintCall :
- SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintConvergentCall :
- SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintCallUni :
- SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintConvergentCallUni :
- SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParam :
- SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV2 :
- SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV4 :
- SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamU32 :
- SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamS32 :
- SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgBegin :
- SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArg :
- SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LastCallArg :
- SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgEnd :
- SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVoid :
- SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def Prototype :
- SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVal :
- SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def MoveParam :
- SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
-def StoreRetval :
- SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV2 :
- SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV4 :
- SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def PseudoUseParam :
- SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def RETURNNode :
- SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-
-let mayLoad = 1 in {
- class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
- []>;
-
- class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
- !strconcat("ld.param.v2", opstr,
- " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
-
- class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
- (ins i32imm:$b),
- !strconcat("ld.param.v4", opstr,
- " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
- []>;
-}
-
-class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat("mov", opstr, " \t$dst, retval$b;"),
- [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
-
-let mayStore = 1 in {
- class StoreParamInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
- !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
- []>;
-
- class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
- i32imm:$a, i32imm:$b),
- !strconcat("st.param.v2", opstr,
- " \t[param$a+$b], {{$val, $val2}};"),
- []>;
-
- class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a,
- i32imm:$b),
- !strconcat("st.param.v4", opstr,
- " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
- []>;
-
- class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
- !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
- []>;
-
- class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
- !strconcat("st.param.v2", opstr,
- " \t[func_retval0+$a], {{$val, $val2}};"),
- []>;
-
- class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs),
- (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a),
- !strconcat("st.param.v4", opstr,
- " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
- []>;
-}
-
-let isCall=1 in {
- multiclass CALL<string OpcStr, SDNode OpNode> {
- def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
- def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
- def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
- def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
- def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
- [(OpNode (i32 4))]>;
- def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
- [(OpNode (i32 5))]>;
- def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5), "),
- [(OpNode (i32 6))]>;
- def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5, retval6), "),
- [(OpNode (i32 7))]>;
- def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5, retval6, retval7), "),
- [(OpNode (i32 8))]>;
- }
-}
-
-defm Call : CALL<"call", PrintCall>;
-defm CallUni : CALL<"call.uni", PrintCallUni>;
-
-// Convergent call instructions. These are identical to regular calls, except
-// they have the isConvergent bit set.
-let isConvergent=1 in {
- defm ConvergentCall : CALL<"call", PrintConvergentCall>;
- defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
-}
-
-def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
-def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
-def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
-def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
-def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
-def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
-def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
-def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
-def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
-def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
-def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
-def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
-def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
-def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
-def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
-def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
-def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
-def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
-def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
-def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
-def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
-def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
-
-def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
-def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
-
-def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
-def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
-def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
-def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
-def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
-def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
-
-def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
-def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
-def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
-
-def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
-def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
-def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
-def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
-def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
-def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
-def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
-def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
-def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
-def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
-def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
-
-def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
-def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
-def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
-def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
-def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
-def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
-def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
-def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
-def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
-def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
-def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
-
-def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
-def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
-def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
-def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
-def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
-def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
-def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
-def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
-def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
-def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
-def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
-
-def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
-def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
-def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
-def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
-
-class CallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a, ",
- [(CallArg (i32 0), regclass:$a)]>;
-
-class LastCallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a",
- [(LastCallArg (i32 0), regclass:$a)]>;
-
-def CallArgI64 : CallArgInst<Int64Regs>;
-def CallArgI32 : CallArgInst<Int32Regs>;
-def CallArgI16 : CallArgInst<Int16Regs>;
-def CallArgF64 : CallArgInst<Float64Regs>;
-def CallArgF32 : CallArgInst<Float32Regs>;
-
-def LastCallArgI64 : LastCallArgInst<Int64Regs>;
-def LastCallArgI32 : LastCallArgInst<Int32Regs>;
-def LastCallArgI16 : LastCallArgInst<Int16Regs>;
-def LastCallArgF64 : LastCallArgInst<Float64Regs>;
-def LastCallArgF32 : LastCallArgInst<Float32Regs>;
-
-def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
- [(CallArg (i32 0), (i32 imm:$a))]>;
-def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
- [(LastCallArg (i32 0), (i32 imm:$a))]>;
-
-def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
- [(CallArg (i32 1), (i32 imm:$a))]>;
-def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
- [(LastCallArg (i32 1), (i32 imm:$a))]>;
-
-def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
- [(CallVoid (Wrapper tglobaladdr:$addr))]>;
-def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
- [(CallVoid Int32Regs:$addr)]>;
-def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
- [(CallVoid Int64Regs:$addr)]>;
-def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
- [(Prototype (i32 imm:$val))]>;
-
-def DeclareRetMemInst :
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
- ".param .align $align .b8 retval$num[$size];",
- [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetScalarInst :
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".param .b$size retval$num;",
- [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetRegInst :
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".reg .b$size retval$num;",
- [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
-
-def DeclareParamInst :
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
- ".param .align $align .b8 param$a[$size];",
- [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
-def DeclareScalarParamInst :
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".param .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
-def DeclareScalarRegInst :
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".reg .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
-
-class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
- NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
- !strconcat("mov", asmstr, " \t$dst, $src;"),
- [(set regclass:$dst, (MoveParam regclass:$src))]>;
-
-def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
-def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
-def MoveParamI16 :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.u16.u32 \t$dst, $src;",
- [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
-def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
-def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
-def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
-
-class PseudoUseParamInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$src),
- "// Pseudo use of $src",
- [(PseudoUseParam regclass:$src)]>;
-
-def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
-def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
-def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
-def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
-def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
-
-
-//
-// Load / Store Handling
-//
-multiclass LD<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _areg : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _areg_64 : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _ari : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
- def _ari_64 : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
- def _asi : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
-}
-
-let mayLoad=1, hasSideEffects=0 in {
- defm LD_i8 : LD<Int16Regs>;
- defm LD_i16 : LD<Int16Regs>;
- defm LD_i32 : LD<Int32Regs>;
- defm LD_i64 : LD<Int64Regs>;
- defm LD_f16 : LD<Float16Regs>;
- defm LD_f16x2 : LD<Float16x2Regs>;
- defm LD_f32 : LD<Float32Regs>;
- defm LD_f64 : LD<Float64Regs>;
-}
-
-multiclass ST<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _areg : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _ari : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
- def _ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
- def _asi : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
-}
-
-let mayStore=1, hasSideEffects=0 in {
- defm ST_i8 : ST<Int16Regs>;
- defm ST_i16 : ST<Int16Regs>;
- defm ST_i32 : ST<Int32Regs>;
- defm ST_i64 : ST<Int64Regs>;
- defm ST_f16 : ST<Float16Regs>;
- defm ST_f16x2 : ST<Float16x2Regs>;
- defm ST_f32 : ST<Float32Regs>;
- defm ST_f64 : ST<Float64Regs>;
-}
-
-// The following is used only in and after vector elementizations. Vector
-// elementization happens at the machine instruction level, so the following
-// instructions never appear in the DAG.
-multiclass LD_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_areg : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_areg_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_ari : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v2_ari_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v2_asi : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v4_avar : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_areg : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_areg_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_ari : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
- def _v4_ari_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
- def _v4_asi : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
-}
-let mayLoad=1, hasSideEffects=0 in {
- defm LDV_i8 : LD_VEC<Int16Regs>;
- defm LDV_i16 : LD_VEC<Int16Regs>;
- defm LDV_i32 : LD_VEC<Int32Regs>;
- defm LDV_i64 : LD_VEC<Int64Regs>;
- defm LDV_f16 : LD_VEC<Float16Regs>;
- defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
- defm LDV_f32 : LD_VEC<Float32Regs>;
- defm LDV_f64 : LD_VEC<Float64Regs>;
-}
-
-multiclass ST_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_areg : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_ari : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v2_ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v2_asi : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v4_avar : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_areg : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_ari : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_asi : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
-}
-
-let mayStore=1, hasSideEffects=0 in {
- defm STV_i8 : ST_VEC<Int16Regs>;
- defm STV_i16 : ST_VEC<Int16Regs>;
- defm STV_i32 : ST_VEC<Int32Regs>;
- defm STV_i64 : ST_VEC<Int64Regs>;
- defm STV_f16 : ST_VEC<Float16Regs>;
- defm STV_f16x2 : ST_VEC<Float16x2Regs>;
- defm STV_f32 : ST_VEC<Float32Regs>;
- defm STV_f64 : ST_VEC<Float64Regs>;
-}
-
-//---- Conversion ----
-
-class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
- NVPTXRegClass regclassOut> :
- NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
- !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
- [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
-
-def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
-def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
-def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
-def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
-def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
-def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
-def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
-def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
-
-// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
-// we cannot specify floating-point literals in isel patterns. Therefore, we
-// use an integer selp to select either 1 or 0 and then cvt to floating-point.
-
-// sint -> f16
-def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
- (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
- (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
- (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
- (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f16
-def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
- (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
- (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
- (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
- (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
-
-// sint -> f32
-def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
- (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
- (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
- (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
- (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f32
-def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
- (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
- (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
- (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
- (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
-
-// sint -> f64
-def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
- (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
- (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
- (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
- (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f64
-def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
- (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
- (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
- (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
- (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
-
-
-// f16 -> sint
-def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
- (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
- (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
- (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
- (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
- (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
- (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
-
-// f16 -> uint
-def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
- (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
- (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
- (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
- (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
- (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
- (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
-
-// f32 -> sint
-def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
- (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
- (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
- (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
- (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
- (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
- (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
-
-// f32 -> uint
-def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
- (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
- (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
- (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
- (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
- (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
- (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
-
-// f64 -> sint
-def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
- (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
- (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
- (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
-
-// f64 -> uint
-def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
- (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
- (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
- (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
-
-// sext i1
-def : Pat<(i16 (sext Int1Regs:$a)),
- (SELP_s16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (sext Int1Regs:$a)),
- (SELP_s32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (sext Int1Regs:$a)),
- (SELP_s64ii -1, 0, Int1Regs:$a)>;
-
-// zext i1
-def : Pat<(i16 (zext Int1Regs:$a)),
- (SELP_u16ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (zext Int1Regs:$a)),
- (SELP_u32ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (zext Int1Regs:$a)),
- (SELP_u64ii 1, 0, Int1Regs:$a)>;
-
-// anyext i1
-def : Pat<(i16 (anyext Int1Regs:$a)),
- (SELP_u16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (anyext Int1Regs:$a)),
- (SELP_u32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (anyext Int1Regs:$a)),
- (SELP_u64ii -1, 0, Int1Regs:$a)>;
-
-// sext i16
-def : Pat<(i32 (sext Int16Regs:$a)),
- (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (sext Int16Regs:$a)),
- (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
-
-// zext i16
-def : Pat<(i32 (zext Int16Regs:$a)),
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (zext Int16Regs:$a)),
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
-
-// anyext i16
-def : Pat<(i32 (anyext Int16Regs:$a)),
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (anyext Int16Regs:$a)),
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
-
-// sext i32
-def : Pat<(i64 (sext Int32Regs:$a)),
- (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
-
-// zext i32
-def : Pat<(i64 (zext Int32Regs:$a)),
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
-
-// anyext i32
-def : Pat<(i64 (anyext Int32Regs:$a)),
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
-
-
-// truncate i64
-def : Pat<(i32 (trunc Int64Regs:$a)),
- (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i16 (trunc Int64Regs:$a)),
- (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int64Regs:$a)),
- (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
-
-// truncate i32
-def : Pat<(i16 (trunc Int32Regs:$a)),
- (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int32Regs:$a)),
- (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
-
-// truncate i16
-def : Pat<(i1 (trunc Int16Regs:$a)),
- (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
-
-// sext_inreg
-def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
-
-
-// Select instructions with 32-bit predicates
-def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
- (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
- (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
- (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
- (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
- (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
- (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-
-
-let hasSideEffects = 0 in {
- // pack a set of smaller int registers to a larger int register
- def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2,
- Int16Regs:$s3, Int16Regs:$s4),
- "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
- def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2),
- "mov.b32 \t$d, {{$s1, $s2}};", []>;
- def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int32Regs:$s1, Int32Regs:$s2),
- "mov.b64 \t$d, {{$s1, $s2}};", []>;
- def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
- (ins Float32Regs:$s1, Float32Regs:$s2),
- "mov.b64 \t$d, {{$s1, $s2}};", []>;
-
- // unpack a larger int register to a set of smaller int registers
- def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
- Int16Regs:$d3, Int16Regs:$d4),
- (ins Int64Regs:$s),
- "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
- def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
- (ins Int32Regs:$s),
- "mov.b32 \t{{$d1, $d2}}, $s;", []>;
- def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
- (ins Int64Regs:$s),
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;
- def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
- (ins Float64Regs:$s),
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;
-
-}
-
-let hasSideEffects = 0 in {
- // Extract element of f16x2 register. PTX does not provide any way
- // to access elements of f16x2 vector directly, so we need to
- // extract it using a temporary register.
- def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16x2Regs:$src),
- "{{ .reg .b16 \t%tmp_hi;\n\t"
- " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
- [(set Float16Regs:$dst,
- (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
- def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16x2Regs:$src),
- "{{ .reg .b16 \t%tmp_lo;\n\t"
- " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
- [(set Float16Regs:$dst,
- (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
-
- // Coalesce two f16 registers into f16x2
- def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- "mov.b32 \t$dst, {{$a, $b}};",
- [(set Float16x2Regs:$dst,
- (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
-
- // Directly initializing underlying the b32 register is one less SASS
- // instruction than than vector-packing move.
- def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
- "mov.b32 \t$dst, $src;",
- []>;
-
- // Split f16x2 into two f16 registers.
- def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
- (ins Float16x2Regs:$src),
- "mov.b32 \t{{$lo, $hi}}, $src;",
- []>;
- // Split an i32 into two f16
- def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
- (ins Int32Regs:$src),
- "mov.b32 \t{{$lo, $hi}}, $src;",
- []>;
-}
-
-// Count leading zeros
-let hasSideEffects = 0 in {
- def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "clz.b32 \t$d, $a;", []>;
- def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "clz.b64 \t$d, $a;", []>;
-}
-
-// 32-bit has a direct PTX instruction
-def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
-
-// The return type of the ctlz ISD node is the same as its input, but the PTX
-// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
-// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
-// truncating back down to 32 bits.
-def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
-
-// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
-// result back to 16-bits if necessary. We also need to subtract 16 because
-// the high-order 16 zeros were counted.
-//
-// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
-// use to save one SASS instruction (on sm_35 anyway):
-//
-// mov.b32 $tmp, {0xffff, $a}
-// ctlz.b32 $result, $tmp
-//
-// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
-// and then ctlz that value. This way we don't have to subtract 16 from the
-// result. Unfortunately today we don't have a way to generate
-// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
-def : Pat<(ctlz Int16Regs:$a),
- (SUBi16ri (CVT_u16_u32
- (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
-def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
- (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
-
-// Population count
-let hasSideEffects = 0 in {
- def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "popc.b32 \t$d, $a;", []>;
- def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "popc.b64 \t$d, $a;", []>;
-}
-
-// 32-bit has a direct PTX instruction
-def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
-
-// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
-// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
-// pattern that avoids the type conversion if we're truncating the result to
-// i32 anyway.
-def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
-
-// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
-// If we know that we're storing into an i32, we can avoid the final trunc.
-def : Pat<(ctpop Int16Regs:$a),
- (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
-def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
- (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
-
-// fpround f32 -> f16
-def : Pat<(f16 (fpround Float32Regs:$a)),
- (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fpround Float32Regs:$a)),
- (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
-
-// fpround f64 -> f16
-def : Pat<(f16 (fpround Float64Regs:$a)),
- (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fpround Float64Regs:$a)),
- (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
-
-// fpround f64 -> f32
-def : Pat<(f32 (fpround Float64Regs:$a)),
- (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpround Float64Regs:$a)),
- (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
-
-// fpextend f16 -> f32
-def : Pat<(f32 (fpextend Float16Regs:$a)),
- (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpextend Float16Regs:$a)),
- (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
-
-// fpextend f16 -> f64
-def : Pat<(f64 (fpextend Float16Regs:$a)),
- (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float16Regs:$a)),
- (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
-
-// fpextend f32 -> f64
-def : Pat<(f64 (fpextend Float32Regs:$a)),
- (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float32Regs:$a)),
- (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
-
-def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-
-// fceil, ffloor, fround, ftrunc.
-
-def : Pat<(fceil Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fceil Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fceil Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fceil Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fceil Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
-
-def : Pat<(ffloor Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ffloor Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ffloor Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ffloor Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ffloor Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
-
-def : Pat<(fround Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fround Float16Regs:$a)),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fround Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fround Float32Regs:$a)),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(f64 (fround Float64Regs:$a)),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-def : Pat<(ftrunc Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ftrunc Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ftrunc Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ftrunc Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ftrunc Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
-
-// nearbyint and rint are implemented as rounding to nearest even. This isn't
-// strictly correct, because it causes us to ignore the rounding mode. But it
-// matches what CUDA's "libm" does.
-
-def : Pat<(fnearbyint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fnearbyint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fnearbyint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fnearbyint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fnearbyint Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-def : Pat<(frint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(frint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(frint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(frint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(frint Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-
-//-----------------------------------
-// Control-flow
-//-----------------------------------
-
-let isTerminator=1 in {
- let isReturn=1, isBarrier=1 in
- def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
-
- let isBranch=1 in
- def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@$a bra \t$target;",
- [(brcond Int1Regs:$a, bb:$target)]>;
- let isBranch=1 in
- def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@!$a bra \t$target;", []>;
-
- let isBranch=1, isBarrier=1 in
- def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
- "bra.uni \t$target;", [(br bb:$target)]>;
-}
-
-def : Pat<(brcond Int32Regs:$a, bb:$target),
- (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
-
-// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
-// conditional branch if the target block is the next block so that the code
-// can fall through to the target block. The invertion is done by 'xor
-// condition, 1', which will be translated to (setne condition, -1). Since ptx
-// supports '@!pred bra target', we should use it.
-def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
- (CBranchOther Int1Regs:$a, bb:$target)>;
-
-// Call
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPSideEffect]>;
-
-def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
-def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def calltarget : Operand<i32>;
-let isCall=1 in {
- def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
-}
-
-def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
-def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
-
-// Pseudo instructions.
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : NVPTXInst<outs, ins, asmstr, pattern>;
-
-def Callseq_Start :
- NVPTXInst<(outs), (ins i32imm:$amt),
- "\\{ // callseq $amt\n"
- "\t.reg .b32 temp_param_reg;",
- [(callseq_start timm:$amt)]>;
-def Callseq_End :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\} // callseq $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
-
-// trap instruction
-def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
-
-// Call prototype wrapper
-def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def CallPrototype :
- SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def ProtoIdent : Operand<i32> {
- let PrintMethod = "printProtoIdent";
-}
-def CALL_PROTOTYPE :
- NVPTXInst<(outs), (ins ProtoIdent:$ident),
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
-
-
-include "NVPTXIntrinsics.td"
-
-
-//-----------------------------------
-// Notes
-//-----------------------------------
-// BSWAP is currently expanded. The following is a more efficient
-// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
-// - for sm_20, use pmpt (use vector scalar mov to get the pack and
-// unpack). sm_20 supports native 32-bit register, but not native 16-bit
-// register.
+//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the PTX instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "NVPTXInstrFormats.td"
+
+// A NOP instruction
+let hasSideEffects = 0 in {
+ def NOP : NVPTXInst<(outs), (ins), "", []>;
+}
+
+let OperandType = "OPERAND_IMMEDIATE" in {
+ def f16imm : Operand<f16>;
+}
+
+// List of vector specific properties
+def isVecLD : VecInstTypeEnum<1>;
+def isVecST : VecInstTypeEnum<2>;
+def isVecBuild : VecInstTypeEnum<3>;
+def isVecShuffle : VecInstTypeEnum<4>;
+def isVecExtract : VecInstTypeEnum<5>;
+def isVecInsert : VecInstTypeEnum<6>;
+def isVecDest : VecInstTypeEnum<7>;
+def isVecOther : VecInstTypeEnum<15>;
+
+//===----------------------------------------------------------------------===//
+// NVPTX Operand Definitions.
+//===----------------------------------------------------------------------===//
+
+def brtarget : Operand<OtherVT>;
+
+// CVT conversion modes
+// These must match the enum in NVPTX.h
+def CvtNONE : PatLeaf<(i32 0x0)>;
+def CvtRNI : PatLeaf<(i32 0x1)>;
+def CvtRZI : PatLeaf<(i32 0x2)>;
+def CvtRMI : PatLeaf<(i32 0x3)>;
+def CvtRPI : PatLeaf<(i32 0x4)>;
+def CvtRN : PatLeaf<(i32 0x5)>;
+def CvtRZ : PatLeaf<(i32 0x6)>;
+def CvtRM : PatLeaf<(i32 0x7)>;
+def CvtRP : PatLeaf<(i32 0x8)>;
+
+def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
+def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
+def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
+def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
+def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
+def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
+def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
+def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
+def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
+
+def CvtSAT : PatLeaf<(i32 0x20)>;
+def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
+
+def CvtMode : Operand<i32> {
+ let PrintMethod = "printCvtMode";
+}
+
+// Compare modes
+// These must match the enum in NVPTX.h
+def CmpEQ : PatLeaf<(i32 0)>;
+def CmpNE : PatLeaf<(i32 1)>;
+def CmpLT : PatLeaf<(i32 2)>;
+def CmpLE : PatLeaf<(i32 3)>;
+def CmpGT : PatLeaf<(i32 4)>;
+def CmpGE : PatLeaf<(i32 5)>;
+def CmpEQU : PatLeaf<(i32 10)>;
+def CmpNEU : PatLeaf<(i32 11)>;
+def CmpLTU : PatLeaf<(i32 12)>;
+def CmpLEU : PatLeaf<(i32 13)>;
+def CmpGTU : PatLeaf<(i32 14)>;
+def CmpGEU : PatLeaf<(i32 15)>;
+def CmpNUM : PatLeaf<(i32 16)>;
+def CmpNAN : PatLeaf<(i32 17)>;
+
+def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
+def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
+def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
+def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
+def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
+def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
+def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
+def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
+def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
+def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
+def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
+def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
+def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
+def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
+
+def CmpMode : Operand<i32> {
+ let PrintMethod = "printCmpMode";
+}
+def VecElement : Operand<i32> {
+ let PrintMethod = "printVecElement";
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instruction Predicate Definitions
+//===----------------------------------------------------------------------===//
+
+
+def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
+def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
+def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
+def useAtomRedG32forGen32 :
+ Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
+def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
+def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
+def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
+def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
+def useAtomRedG64forGen64 :
+ Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
+def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
+def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
+def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
+def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
+def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
+def hasVote : Predicate<"Subtarget->hasVote()">;
+def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
+def hasLDG : Predicate<"Subtarget->hasLDG()">;
+def hasLDU : Predicate<"Subtarget->hasLDU()">;
+def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
+
+def doF32FTZ : Predicate<"useF32FTZ()">;
+def doNoF32FTZ : Predicate<"!useF32FTZ()">;
+
+def doMulWide : Predicate<"doMulWide">;
+
+def allowFMA : Predicate<"allowFMA()">;
+def noFMA : Predicate<"!allowFMA()">;
+def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
+
+def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
+def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
+
+def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
+def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
+
+def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
+def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
+
+def true : Predicate<"true">;
+
+def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
+
+def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
+
+//===----------------------------------------------------------------------===//
+// Some Common Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+// Template for instructions which take three int64, int32, or int16 args.
+// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
+multiclass I3<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+}
+
+// Template for instructions which take 3 int32 args. The instructions are
+// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
+multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+}
+
+// Template for instructions which take three fp64 or fp32 args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32 functions.
+//
+// This multiclass should be used for nodes that cannot be folded into FMAs.
+// For nodes that can be folded into FMAs (i.e. adds and muls), use
+// F3_fma_component.
+multiclass F3<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+}
+
+// Template for instructions which take three FP args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32/fp16 functions.
+//
+// This multiclass should be used for nodes that can be folded to make fma ops.
+// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
+// just like the non ".rn" op, but prevents ptxas from creating FMAs.
+multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+
+ def f16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ def f16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ // These have strange names so we don't perturb existing mir tests.
+ def _rnf64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+ def _rnf16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+}
+
+// Template for operations which take two f32 or f64 operands. Provides three
+// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
+// subnormal inputs and results to zero).
+multiclass F2<string OpcStr, SDNode OpNode> {
+ def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
+ !strconcat(OpcStr, ".f64 \t$dst, $a;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+ def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+ Requires<[doF32FTZ]>;
+ def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instructions.
+//===----------------------------------------------------------------------===//
+
+//-----------------------------------
+// Type Conversion
+//-----------------------------------
+
+let hasSideEffects = 0 in {
+ // Generate a cvt to the given type from all possible types. Each instance
+ // takes a CvtMode immediate that defines the conversion mode to use. It can
+ // be CvtNONE to omit a conversion mode.
+ multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
+ def _s8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s8 \t$dst, $src;"), []>;
+ def _u8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u8 \t$dst, $src;"), []>;
+ def _s16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s16 \t$dst, $src;"), []>;
+ def _u16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u16 \t$dst, $src;"), []>;
+ def _s32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s32 \t$dst, $src;"), []>;
+ def _u32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u32 \t$dst, $src;"), []>;
+ def _s64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s64 \t$dst, $src;"), []>;
+ def _u64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u64 \t$dst, $src;"), []>;
+ def _f16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f16 \t$dst, $src;"), []>;
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f32 \t$dst, $src;"), []>;
+ def _f64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f64 \t$dst, $src;"), []>;
+ }
+
+ // Generate cvts from all types to all types.
+ defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
+ defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
+ defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
+ defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
+ defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
+ defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
+ defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
+ defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
+ defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
+ defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
+ defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
+
+ // These cvts are different from those above: The source and dest registers
+ // are of the same type.
+ def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.s16.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s32 \t$dst, $src;", []>;
+}
+
+//-----------------------------------
+// Integer Arithmetic
+//-----------------------------------
+
+// Template for xor masquerading as int1 arithmetic.
+multiclass ADD_SUB_i1<SDNode OpNode> {
+ def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+}
+
+// int1 addition and subtraction are both just xor.
+defm ADD_i1 : ADD_SUB_i1<add>;
+defm SUB_i1 : ADD_SUB_i1<sub>;
+
+// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
+// also use these for unsigned arithmetic.
+defm ADD : I3<"add.s", add>;
+defm SUB : I3<"sub.s", sub>;
+
+// int32 addition and subtraction with carry-out.
+// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
+defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
+defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
+
+// int32 addition and subtraction with carry-in and carry-out.
+defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
+defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
+
+defm MULT : I3<"mul.lo.s", mul>;
+
+defm MULTHS : I3<"mul.hi.s", mulhs>;
+defm MULTHU : I3<"mul.hi.u", mulhu>;
+
+defm SDIV : I3<"div.s", sdiv>;
+defm UDIV : I3<"div.u", udiv>;
+
+// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
+// will lower it.
+defm SREM : I3<"rem.s", srem>;
+defm UREM : I3<"rem.u", urem>;
+
+// Integer absolute value. NumBits should be one minus the bit width of RC.
+// This idiom implements the algorithm at
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
+multiclass ABS<RegisterClass RC, string SizeName> {
+ def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
+ !strconcat("abs", SizeName, " \t$dst, $a;"),
+ [(set RC:$dst, (abs RC:$a))]>;
+}
+defm ABS_16 : ABS<Int16Regs, ".s16">;
+defm ABS_32 : ABS<Int32Regs, ".s32">;
+defm ABS_64 : ABS<Int64Regs, ".s64">;
+
+// Integer min/max.
+defm SMAX : I3<"max.s", smax>;
+defm UMAX : I3<"max.u", umax>;
+defm SMIN : I3<"min.s", smin>;
+defm UMIN : I3<"min.u", umin>;
+
+//
+// Wide multiplication
+//
+def MULWIDES64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+
+def MULWIDEU64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+
+def MULWIDES32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+
+def MULWIDEU32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+
+def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
+def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
+
+// Matchers for signed, unsigned mul.wide ISD nodes.
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+ (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+ (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+// Predicates used for converting some patterns to mul.wide.
+def SInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(32);
+}]>;
+
+def UInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(32);
+}]>;
+
+def SInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(16);
+}]>;
+
+def UInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(16);
+}]>;
+
+def Int5Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(32);
+}]>;
+
+def Int4Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(16);
+}]>;
+
+def SHL2MUL32 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(32, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
+}]>;
+
+def SHL2MUL16 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(16, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
+}]>;
+
+// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
+def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+
+// Convert "sign/zero-extend then multiply" to mul.wide.
+def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
+ (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
+ (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
+ (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
+ (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+//
+// Integer multiply-add
+//
+def SDTIMAD :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
+def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
+
+def MAD16rrr :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
+def MAD16rri :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
+def MAD16rir :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
+def MAD16rii :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD32rrr :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
+def MAD32rri :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
+def MAD32rir :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
+def MAD32rii :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD64rrr :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
+def MAD64rri :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
+def MAD64rir :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
+def MAD64rii :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
+
+def INEG16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "neg.s16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+def INEG32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "neg.s32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+def INEG64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "neg.s64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+
+//-----------------------------------
+// Floating Point Arithmetic
+//-----------------------------------
+
+// Constant 1.0f
+def FloatConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
+ N->getValueAPF().convertToFloat() == 1.0f;
+}]>;
+// Constant 1.0 (double)
+def DoubleConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
+ N->getValueAPF().convertToDouble() == 1.0;
+}]>;
+
+// Loads FP16 constant into a register.
+//
+// ptxas does not have hex representation for fp16, so we can't use
+// fp16 immediate values in .f16 instructions. Instead we have to load
+// the constant into a register using mov.b16.
+def LOAD_CONST_F16 :
+ NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
+ "mov.b16 \t$dst, $a;", []>;
+
+defm FADD : F3_fma_component<"add", fadd>;
+defm FSUB : F3_fma_component<"sub", fsub>;
+defm FMUL : F3_fma_component<"mul", fmul>;
+
+defm FMIN : F3<"min", fminnum>;
+defm FMAX : F3<"max", fmaxnum>;
+
+defm FABS : F2<"abs", fabs>;
+defm FNEG : F2<"neg", fneg>;
+defm FSQRT : F2<"sqrt.rn", fsqrt>;
+
+//
+// F64 division
+//
+def FDIV641r :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ "rcp.rn.f64 \t$dst, $b;",
+ [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+def FDIV64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+def FDIV64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
+
+//
+// F32 Approximate reciprocal
+//
+def FDIV321r_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV321r :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Approximate division
+//
+def FDIV32approxrr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxrr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Semi-accurate reciprocal
+//
+// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
+//
+def FDIV321r_approx_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV321r_approx :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Semi-accurate division
+//
+def FDIV32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+def FDIV32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Accurate reciprocal
+//
+def FDIV321r_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20, doF32FTZ]>;
+def FDIV321r_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+//
+// F32 Accurate division
+//
+def FDIV32rr_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32ri_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32rr_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+def FDIV32ri_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[reqPTX20]>;
+
+//
+// FMA
+//
+
+multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+ def rir : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+}
+
+multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[useFP16Math, Pred]>;
+}
+
+defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
+defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
+defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
+defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
+defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
+defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
+defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
+
+// sin/cos
+def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "sin.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "cos.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+
+// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
+// i.e. "poor man's fmod()"
+
+// frem - f32 FTZ
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
+ (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
+ Float32Regs:$y))>,
+ Requires<[doF32FTZ]>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
+ (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
+ fpimm:$y))>,
+ Requires<[doF32FTZ]>;
+
+// frem - f32
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
+ (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
+ Float32Regs:$y))>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
+ (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+// frem - f64
+def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
+ (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
+ Float64Regs:$y))>;
+def : Pat<(frem Float64Regs:$x, fpimm:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
+ (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+//-----------------------------------
+// Bitwise operations
+//-----------------------------------
+
+// Template for three-arg bitwise operations. Takes three args, Creates .b16,
+// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
+multiclass BITWISE<string OpcStr, SDNode OpNode> {
+ def b1rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def b1ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+ def b16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def b16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def b32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def b32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def b64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def b64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+}
+
+defm OR : BITWISE<"or", or>;
+defm AND : BITWISE<"and", and>;
+defm XOR : BITWISE<"xor", xor>;
+
+def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
+ "not.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
+def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "not.b16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
+def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "not.b32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
+def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "not.b64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
+
+// Template for left/right shifts. Takes three operands,
+// [dest (reg), src (reg), shift (reg or imm)].
+// dest and src may be int64, int32, or int16, but shift is always int32.
+//
+// This template also defines a 32-bit shift (imm, imm) instruction.
+multiclass SHIFT<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
+ def i32ii :
+ NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
+}
+
+defm SHL : SHIFT<"shl.b", shl>;
+defm SRA : SHIFT<"shr.s", sra>;
+defm SRL : SHIFT<"shr.u", srl>;
+
+// Bit-reverse
+def BREV32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
+ "brev.b32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
+def BREV64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
+ "brev.b64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
+
+//
+// Rotate: Use ptx shf instruction if available.
+//
+
+// 32 bit r2 = rotl r1, n
+// =>
+// r2 = shf.l r1, r1, n
+def ROTL32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTL32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32 bit r2 = rotr r1, n
+// =>
+// r2 = shf.r r1, r1, n
+def ROTR32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTR32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
+def ROT32imm_sw :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ "shl.b32 \t%lhs, $src, $amt1;\n\t"
+ "shr.b32 \t%rhs, $src, $amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]>;
+def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate left by register.
+def ROTL32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shl.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shr.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate right by register.
+def ROTR32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shr.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shl.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
+def ROT64imm_sw :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ "shl.b64 \t%lhs, $src, $amt1;\n\t"
+ "shr.b64 \t%rhs, $src, $amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_64 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
+def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
+
+// 64-bit software rotate left by register.
+def ROTL64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shl.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shr.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shr.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shl.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+
+//
+// Funnnel shift in clamp mode
+//
+
+// Create SDNodes so they can be used in the DAG code, e.g.
+// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+def SDTIntShiftDOp :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisInt<3>]>;
+def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+def FUNSHFLCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+def FUNSHFRCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+//
+// BFE - bit-field extract
+//
+
+// Template for BFE instructions. Takes four args,
+// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
+// Start may be an imm only if end is also an imm. FIXME: Is this a
+// restriction in PTX?
+//
+// dest and src may be int32 or int64, but start and end are always int32.
+multiclass BFE<string TyStr, RegisterClass RC> {
+ def rrr
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rri
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rii
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, i32imm:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+}
+
+let hasSideEffects = 0 in {
+ defm BFE_S32 : BFE<"s32", Int32Regs>;
+ defm BFE_U32 : BFE<"u32", Int32Regs>;
+ defm BFE_S64 : BFE<"s64", Int64Regs>;
+ defm BFE_U64 : BFE<"u64", Int64Regs>;
+}
+
+//-----------------------------------
+// Comparison instructions (setp, set)
+//-----------------------------------
+
+// FIXME: This doesn't cover versions of set and setp that combine with a
+// boolean predicate, e.g. setp.eq.and.b16.
+
+let hasSideEffects = 0 in {
+ multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ir :
+ NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
+defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
+defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
+defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
+defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
+defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
+defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
+defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
+defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
+defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
+defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
+def SETP_f16rr :
+ NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
+ []>, Requires<[useFP16Math]>;
+
+def SETP_f16x2rr :
+ NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
+ []>,
+ Requires<[useFP16Math]>;
+
+
+// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
+// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
+// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
+
+let hasSideEffects = 0 in {
+ multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
+defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
+defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
+defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
+defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
+defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
+defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
+defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
+defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
+defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
+defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
+defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
+
+//-----------------------------------
+// Selection instructions (selp)
+//-----------------------------------
+
+// FIXME: Missing slct
+
+// selp instructions that don't have any pattern matches; we explicitly use
+// them within this file.
+let hasSideEffects = 0 in {
+ multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ }
+
+ multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
+ SDNode ImmNode> {
+ def rr :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
+ def ri :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
+ def ir :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
+ def ii :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+ }
+}
+
+// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
+// good.
+defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
+defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
+defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
+defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
+defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
+defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
+defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
+defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
+defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
+defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
+defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
+defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
+
+def SELP_f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Float16x2Regs:$dst,
+ (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
+
+//-----------------------------------
+// Data Movement (Load / Store, Move)
+//-----------------------------------
+
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
+ [SDNPWantRoot]>;
+def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
+ [SDNPWantRoot]>;
+
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int32Regs, i32imm);
+}
+def MEMri64 : Operand<i64> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int64Regs, i64imm);
+}
+
+def imem : Operand<iPTR> {
+ let PrintMethod = "printOperand";
+}
+
+def imemAny : Operand<iPTRAny> {
+ let PrintMethod = "printOperand";
+}
+
+def LdStCode : Operand<i32> {
+ let PrintMethod = "printLdStCode";
+}
+
+def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
+
+// Load a memory address into a u32 or u64 register.
+def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
+ "mov.u32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
+ "mov.u64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+
+// Get pointer to local stack.
+let hasSideEffects = 0 in {
+ def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
+ "mov.u32 \t$d, __local_depot$num;", []>;
+ def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
+ "mov.u64 \t$d, __local_depot$num;", []>;
+}
+
+
+// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
+let IsSimpleMove=1, hasSideEffects=0 in {
+ def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
+ "mov.pred \t$dst, $sss;", []>;
+ def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+ def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
+ "mov.u32 \t$dst, $sss;", []>;
+ def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
+ "mov.u64 \t$dst, $sss;", []>;
+
+ def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
+ // We have to use .b16 here as there's no mov.f16.
+ "mov.b16 \t$dst, $src;", []>;
+ def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "mov.f32 \t$dst, $src;", []>;
+ def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
+ "mov.f64 \t$dst, $src;", []>;
+}
+
+def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
+ "mov.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, imm:$src)]>;
+def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int16Regs:$dst, imm:$src)]>;
+def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
+ "mov.u32 \t$dst, $src;",
+ [(set Int32Regs:$dst, imm:$src)]>;
+def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
+ "mov.u64 \t$dst, $src;",
+ [(set Int64Regs:$dst, imm:$src)]>;
+
+def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
+ "mov.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, fpimm:$src)]>;
+def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
+ "mov.f64 \t$dst, $src;",
+ [(set Float64Regs:$dst, fpimm:$src)]>;
+
+def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
+
+//---- Copy Frame Index ----
+def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
+ "add.u32 \t$dst, ${addr:add};",
+ [(set Int32Regs:$dst, ADDRri:$addr)]>;
+def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
+ "add.u64 \t$dst, ${addr:add};",
+ [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+
+//-----------------------------------
+// Comparison and Selection
+//-----------------------------------
+
+multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
+ Instruction setp_16rr,
+ Instruction setp_16ri,
+ Instruction setp_16ir,
+ Instruction setp_32rr,
+ Instruction setp_32ri,
+ Instruction setp_32ir,
+ Instruction setp_64rr,
+ Instruction setp_64ri,
+ Instruction setp_64ir,
+ Instruction set_16rr,
+ Instruction set_16ri,
+ Instruction set_16ir,
+ Instruction set_32rr,
+ Instruction set_32ri,
+ Instruction set_32ir,
+ Instruction set_64rr,
+ Instruction set_64ri,
+ Instruction set_64ir> {
+ // i16 -> pred
+ def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
+ (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
+ (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> pred
+ def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
+ (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
+ (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> pred
+ def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
+ (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
+ (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
+
+ // i16 -> i32
+ def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
+ (set_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
+ (set_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> i32
+ def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
+ (set_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
+ (set_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> i32
+ def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
+ (set_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
+ (set_64ir imm:$a, Int64Regs:$b, Mode)>;
+}
+
+multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_s16rr, SETP_s16ri, SETP_s16ir,
+ SETP_s32rr, SETP_s32ri, SETP_s32ir,
+ SETP_s64rr, SETP_s64ri, SETP_s64ir,
+ SET_s16rr, SET_s16ri, SET_s16ir,
+ SET_s32rr, SET_s32ri, SET_s32ir,
+ SET_s64rr, SET_s64ri, SET_s64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_u16rr, SETP_u16ri, SETP_u16ir,
+ SETP_u32rr, SETP_u32ri, SETP_u32ir,
+ SETP_u64rr, SETP_u64ri, SETP_u64ir,
+ SET_u16rr, SET_u16ri, SET_u16ir,
+ SET_u32rr, SET_u32ri, SET_u32ir,
+ SET_u64rr, SET_u64ri, SET_u64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
+defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
+defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
+defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
+defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
+defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
+defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
+defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
+defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
+defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
+defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
+defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
+
+// i1 compares
+def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+
+def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+// i1 compare -> i32
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+
+
+multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
+ // f16 -> pred
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> pred
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> pred
+ def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+
+ // f16 -> i32
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> i32
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> i32
+ def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+}
+
+defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
+defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
+defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
+defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
+defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
+defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
+
+defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
+defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
+defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
+defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
+defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
+defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
+
+defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
+defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
+defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
+defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
+defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
+defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
+
+defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
+defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
+
+// FIXME: What is this doing here? Can it be deleted?
+// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
+// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+def SDTDeclareParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTDeclareScalarParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
+def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
+def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
+def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
+def SDTCallValProfile : SDTypeProfile<1, 0, []>;
+def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
+def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
+def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
+def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
+
+def DeclareParam :
+ SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareScalarParam :
+ SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRetParam :
+ SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRet :
+ SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LoadParam :
+ SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV2 :
+ SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV4 :
+ SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def PrintCall :
+ SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCall :
+ SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintCallUni :
+ SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCallUni :
+ SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParam :
+ SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV2 :
+ SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV4 :
+ SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamU32 :
+ SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamS32 :
+ SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgBegin :
+ SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArg :
+ SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LastCallArg :
+ SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgEnd :
+ SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVoid :
+ SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def Prototype :
+ SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVal :
+ SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveParam :
+ SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
+def StoreRetval :
+ SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV2 :
+ SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV4 :
+ SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def PseudoUseParam :
+ SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def RETURNNode :
+ SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+let mayLoad = 1 in {
+ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
+ []>;
+
+ class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
+ !strconcat("ld.param.v2", opstr,
+ " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
+
+ class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins i32imm:$b),
+ !strconcat("ld.param.v4", opstr,
+ " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
+ []>;
+}
+
+class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("mov", opstr, " \t$dst, retval$b;"),
+ [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
+
+let mayStore = 1 in {
+ class StoreParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
+ []>;
+
+ class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
+ i32imm:$a, i32imm:$b),
+ !strconcat("st.param.v2", opstr,
+ " \t[param$a+$b], {{$val, $val2}};"),
+ []>;
+
+ class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a,
+ i32imm:$b),
+ !strconcat("st.param.v4", opstr,
+ " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
+ []>;
+
+ class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
+ !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
+ []>;
+
+ class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
+ !strconcat("st.param.v2", opstr,
+ " \t[func_retval0+$a], {{$val, $val2}};"),
+ []>;
+
+ class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a),
+ !strconcat("st.param.v4", opstr,
+ " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
+ []>;
+}
+
+let isCall=1 in {
+ multiclass CALL<string OpcStr, SDNode OpNode> {
+ def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
+ def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
+ def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
+ def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
+ def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
+ [(OpNode (i32 4))]>;
+ def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
+ [(OpNode (i32 5))]>;
+ def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5), "),
+ [(OpNode (i32 6))]>;
+ def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6), "),
+ [(OpNode (i32 7))]>;
+ def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6, retval7), "),
+ [(OpNode (i32 8))]>;
+ }
+}
+
+defm Call : CALL<"call", PrintCall>;
+defm CallUni : CALL<"call.uni", PrintCallUni>;
+
+// Convergent call instructions. These are identical to regular calls, except
+// they have the isConvergent bit set.
+let isConvergent=1 in {
+ defm ConvergentCall : CALL<"call", PrintConvergentCall>;
+ defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
+}
+
+def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
+def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
+def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
+def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
+def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
+def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
+def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
+def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
+def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
+def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
+def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
+def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
+def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
+def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
+def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
+def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
+def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
+def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
+def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
+def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
+
+def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
+def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
+
+def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
+def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
+def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
+def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
+def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
+def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
+
+def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
+def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
+def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
+
+def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
+def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
+def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
+def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
+def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
+def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
+def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
+def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
+def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
+def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
+
+def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
+def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
+def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
+def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
+def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
+def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
+def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
+def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
+def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
+def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
+def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
+
+def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
+def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
+def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
+def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
+def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
+def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
+def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
+def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
+def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
+def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
+def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
+
+def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
+def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
+def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
+def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
+
+class CallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a, ",
+ [(CallArg (i32 0), regclass:$a)]>;
+
+class LastCallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a",
+ [(LastCallArg (i32 0), regclass:$a)]>;
+
+def CallArgI64 : CallArgInst<Int64Regs>;
+def CallArgI32 : CallArgInst<Int32Regs>;
+def CallArgI16 : CallArgInst<Int16Regs>;
+def CallArgF64 : CallArgInst<Float64Regs>;
+def CallArgF32 : CallArgInst<Float32Regs>;
+
+def LastCallArgI64 : LastCallArgInst<Int64Regs>;
+def LastCallArgI32 : LastCallArgInst<Int32Regs>;
+def LastCallArgI16 : LastCallArgInst<Int16Regs>;
+def LastCallArgF64 : LastCallArgInst<Float64Regs>;
+def LastCallArgF32 : LastCallArgInst<Float32Regs>;
+
+def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
+ [(CallArg (i32 0), (i32 imm:$a))]>;
+def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
+ [(LastCallArg (i32 0), (i32 imm:$a))]>;
+
+def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
+ [(CallArg (i32 1), (i32 imm:$a))]>;
+def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
+ [(LastCallArg (i32 1), (i32 imm:$a))]>;
+
+def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
+ [(CallVoid (Wrapper tglobaladdr:$addr))]>;
+def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
+ [(CallVoid Int32Regs:$addr)]>;
+def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
+ [(CallVoid Int64Regs:$addr)]>;
+def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
+ [(Prototype (i32 imm:$val))]>;
+
+def DeclareRetMemInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
+ ".param .align $align .b8 retval$num[$size];",
+ [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetScalarInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".param .b$size retval$num;",
+ [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetRegInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".reg .b$size retval$num;",
+ [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
+
+def DeclareParamInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
+ ".param .align $align .b8 param$a[$size];",
+ [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
+def DeclareScalarParamInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".param .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
+def DeclareScalarRegInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".reg .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
+
+class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
+ NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+ !strconcat("mov", asmstr, " \t$dst, $src;"),
+ [(set regclass:$dst, (MoveParam regclass:$src))]>;
+
+def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
+def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
+def MoveParamI16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.u16.u32 \t$dst, $src;",
+ [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
+def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
+def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
+def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
+
+class PseudoUseParamInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$src),
+ "// Pseudo use of $src",
+ [(PseudoUseParam regclass:$src)]>;
+
+def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
+def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
+def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
+def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
+def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
+
+
+//
+// Load / Store Handling
+//
+multiclass LD<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _ari : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _ari_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _asi : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+}
+
+let mayLoad=1, hasSideEffects=0 in {
+ defm LD_i8 : LD<Int16Regs>;
+ defm LD_i16 : LD<Int16Regs>;
+ defm LD_i32 : LD<Int32Regs>;
+ defm LD_i64 : LD<Int64Regs>;
+ defm LD_f16 : LD<Float16Regs>;
+ defm LD_f16x2 : LD<Float16x2Regs>;
+ defm LD_f32 : LD<Float32Regs>;
+ defm LD_f64 : LD<Float64Regs>;
+}
+
+multiclass ST<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm ST_i8 : ST<Int16Regs>;
+ defm ST_i16 : ST<Int16Regs>;
+ defm ST_i32 : ST<Int32Regs>;
+ defm ST_i64 : ST<Int64Regs>;
+ defm ST_f16 : ST<Float16Regs>;
+ defm ST_f16x2 : ST<Float16x2Regs>;
+ defm ST_f32 : ST<Float32Regs>;
+ defm ST_f64 : ST<Float64Regs>;
+}
+
+// The following is used only in and after vector elementizations. Vector
+// elementization happens at the machine instruction level, so the following
+// instructions never appear in the DAG.
+multiclass LD_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v4_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+}
+let mayLoad=1, hasSideEffects=0 in {
+ defm LDV_i8 : LD_VEC<Int16Regs>;
+ defm LDV_i16 : LD_VEC<Int16Regs>;
+ defm LDV_i32 : LD_VEC<Int32Regs>;
+ defm LDV_i64 : LD_VEC<Int64Regs>;
+ defm LDV_f16 : LD_VEC<Float16Regs>;
+ defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
+ defm LDV_f32 : LD_VEC<Float32Regs>;
+ defm LDV_f64 : LD_VEC<Float64Regs>;
+}
+
+multiclass ST_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v4_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm STV_i8 : ST_VEC<Int16Regs>;
+ defm STV_i16 : ST_VEC<Int16Regs>;
+ defm STV_i32 : ST_VEC<Int32Regs>;
+ defm STV_i64 : ST_VEC<Int64Regs>;
+ defm STV_f16 : ST_VEC<Float16Regs>;
+ defm STV_f16x2 : ST_VEC<Float16x2Regs>;
+ defm STV_f32 : ST_VEC<Float32Regs>;
+ defm STV_f64 : ST_VEC<Float64Regs>;
+}
+
+//---- Conversion ----
+
+class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
+ NVPTXRegClass regclassOut> :
+ NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
+ !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
+ [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
+
+def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
+def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
+def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
+def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
+def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
+def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
+def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
+def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
+
+// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
+// we cannot specify floating-point literals in isel patterns. Therefore, we
+// use an integer selp to select either 1 or 0 and then cvt to floating-point.
+
+// sint -> f16
+def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
+ (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
+ (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
+ (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
+ (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f16
+def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
+ (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
+ (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
+ (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
+ (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f32
+def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
+ (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
+ (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
+ (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
+ (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f32
+def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
+ (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
+ (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
+ (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
+ (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f64
+def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
+ (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
+ (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
+ (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
+ (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f64
+def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
+ (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
+ (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
+ (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
+ (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+
+
+// f16 -> sint
+def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f16 -> uint
+def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f32 -> sint
+def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f32 -> uint
+def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f64 -> sint
+def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
+ (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
+ (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
+ (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
+
+// f64 -> uint
+def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
+ (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
+ (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
+ (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
+
+// sext i1
+def : Pat<(i16 (sext Int1Regs:$a)),
+ (SELP_s16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (sext Int1Regs:$a)),
+ (SELP_s32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (sext Int1Regs:$a)),
+ (SELP_s64ii -1, 0, Int1Regs:$a)>;
+
+// zext i1
+def : Pat<(i16 (zext Int1Regs:$a)),
+ (SELP_u16ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (zext Int1Regs:$a)),
+ (SELP_u32ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (zext Int1Regs:$a)),
+ (SELP_u64ii 1, 0, Int1Regs:$a)>;
+
+// anyext i1
+def : Pat<(i16 (anyext Int1Regs:$a)),
+ (SELP_u16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (anyext Int1Regs:$a)),
+ (SELP_u32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (anyext Int1Regs:$a)),
+ (SELP_u64ii -1, 0, Int1Regs:$a)>;
+
+// sext i16
+def : Pat<(i32 (sext Int16Regs:$a)),
+ (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (sext Int16Regs:$a)),
+ (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
+
+// zext i16
+def : Pat<(i32 (zext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (zext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// anyext i16
+def : Pat<(i32 (anyext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (anyext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// sext i32
+def : Pat<(i64 (sext Int32Regs:$a)),
+ (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
+
+// zext i32
+def : Pat<(i64 (zext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+// anyext i32
+def : Pat<(i64 (anyext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+
+// truncate i64
+def : Pat<(i32 (trunc Int64Regs:$a)),
+ (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i16 (trunc Int64Regs:$a)),
+ (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int64Regs:$a)),
+ (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i32
+def : Pat<(i16 (trunc Int32Regs:$a)),
+ (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int32Regs:$a)),
+ (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i16
+def : Pat<(i1 (trunc Int16Regs:$a)),
+ (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
+
+// sext_inreg
+def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
+
+
+// Select instructions with 32-bit predicates
+def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
+ (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
+ (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+ (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
+ (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+ (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+ (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+
+
+let hasSideEffects = 0 in {
+ // pack a set of smaller int registers to a larger int register
+ def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2,
+ Int16Regs:$s3, Int16Regs:$s4),
+ "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
+ def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2),
+ "mov.b32 \t$d, {{$s1, $s2}};", []>;
+ def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int32Regs:$s1, Int32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+
+ // unpack a larger int register to a set of smaller int registers
+ def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
+ Int16Regs:$d3, Int16Regs:$d4),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
+ def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
+ (ins Int32Regs:$s),
+ "mov.b32 \t{{$d1, $d2}}, $s;", []>;
+ def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Float64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+
+}
+
+let hasSideEffects = 0 in {
+ // Extract element of f16x2 register. PTX does not provide any way
+ // to access elements of f16x2 vector directly, so we need to
+ // extract it using a temporary register.
+ def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_hi;\n\t"
+ " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
+ def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_lo;\n\t"
+ " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
+
+ // Coalesce two f16 registers into f16x2
+ def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ "mov.b32 \t$dst, {{$a, $b}};",
+ [(set Float16x2Regs:$dst,
+ (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
+
+ // Directly initializing underlying the b32 register is one less SASS
+ // instruction than than vector-packing move.
+ def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
+ "mov.b32 \t$dst, $src;",
+ []>;
+
+ // Split f16x2 into two f16 registers.
+ def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Float16x2Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+ // Split an i32 into two f16
+ def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Int32Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+}
+
+// Count leading zeros
+let hasSideEffects = 0 in {
+ def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "clz.b32 \t$d, $a;", []>;
+ def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "clz.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
+
+// The return type of the ctlz ISD node is the same as its input, but the PTX
+// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
+// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
+// truncating back down to 32 bits.
+def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
+
+// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
+// result back to 16-bits if necessary. We also need to subtract 16 because
+// the high-order 16 zeros were counted.
+//
+// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
+// use to save one SASS instruction (on sm_35 anyway):
+//
+// mov.b32 $tmp, {0xffff, $a}
+// ctlz.b32 $result, $tmp
+//
+// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
+// and then ctlz that value. This way we don't have to subtract 16 from the
+// result. Unfortunately today we don't have a way to generate
+// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
+def : Pat<(ctlz Int16Regs:$a),
+ (SUBi16ri (CVT_u16_u32
+ (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
+def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
+ (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
+
+// Population count
+let hasSideEffects = 0 in {
+ def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "popc.b32 \t$d, $a;", []>;
+ def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "popc.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
+
+// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
+// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
+// pattern that avoids the type conversion if we're truncating the result to
+// i32 anyway.
+def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
+
+// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
+// If we know that we're storing into an i32, we can avoid the final trunc.
+def : Pat<(ctpop Int16Regs:$a),
+ (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
+def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
+ (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
+
+// fpround f32 -> f16
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+
+// fpround f64 -> f16
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
+
+// fpround f64 -> f32
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
+
+// fpextend f16 -> f32
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f16 -> f64
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f32 -> f64
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
+
+def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+// fceil, ffloor, fround, ftrunc.
+
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
+
+def : Pat<(fround Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fround Float16Regs:$a)),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fround Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fround Float32Regs:$a)),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(f64 (fround Float64Regs:$a)),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
+
+// nearbyint and rint are implemented as rounding to nearest even. This isn't
+// strictly correct, because it causes us to ignore the rounding mode. But it
+// matches what CUDA's "libm" does.
+
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+
+//-----------------------------------
+// Control-flow
+//-----------------------------------
+
+let isTerminator=1 in {
+ let isReturn=1, isBarrier=1 in
+ def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
+
+ let isBranch=1 in
+ def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@$a bra \t$target;",
+ [(brcond Int1Regs:$a, bb:$target)]>;
+ let isBranch=1 in
+ def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@!$a bra \t$target;", []>;
+
+ let isBranch=1, isBarrier=1 in
+ def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
+ "bra.uni \t$target;", [(br bb:$target)]>;
+}
+
+def : Pat<(brcond Int32Regs:$a, bb:$target),
+ (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
+
+// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
+// conditional branch if the target block is the next block so that the code
+// can fall through to the target block. The invertion is done by 'xor
+// condition, 1', which will be translated to (setne condition, -1). Since ptx
+// supports '@!pred bra target', we should use it.
+def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
+ (CBranchOther Int1Regs:$a, bb:$target)>;
+
+// Call
+def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect]>;
+
+def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def calltarget : Operand<i32>;
+let isCall=1 in {
+ def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
+}
+
+def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
+
+// Pseudo instructions.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : NVPTXInst<outs, ins, asmstr, pattern>;
+
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt),
+ "\\{ // callseq $amt\n"
+ "\t.reg .b32 temp_param_reg;",
+ [(callseq_start timm:$amt)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+// trap instruction
+def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
+
+// Call prototype wrapper
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> {
+ let PrintMethod = "printProtoIdent";
+}
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
+include "NVPTXIntrinsics.td"
+
+
+//-----------------------------------
+// Notes
+//-----------------------------------
+// BSWAP is currently expanded. The following is a more efficient
+// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
+// - for sm_20, use pmpt (use vector scalar mov to get the pack and
+// unpack). sm_20 supports native 32-bit register, but not native 16-bit
+// register.
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 2a402deccbca..40bfe3a449f7 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1459,8 +1459,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
if (FI->usesPICBase())
- BuildMI(MBB, MBBI, dl, LoadInst)
- .addReg(PPC::R30)
+ BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30)
.addImm(PBPOffset)
.addReg(RBReg);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 483e9b171d57..685f24cb502e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12031,7 +12031,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index c44e371856a5..acb34d5baaa8 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1881,7 +1881,7 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
const SelectionDAG &DAG,
unsigned Depth) const {
KnownBits Known2;
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index fee008b9572a..a30bf34857b5 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -850,12 +850,18 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
- // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
+ // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
+ // super register in case one of the subregs is undefined.
+ // This handles ADDR128 too.
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
+ MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
+ .addReg(SrcReg, RegState::Implicit);
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
+ MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
+ .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
return;
}
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index c1cfc82b4a81..32ab475f1186 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -776,11 +776,6 @@ private:
bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
- /// MS-compatibility:
- /// Obtain an appropriate size qualifier, when facing its absence,
- /// upon AVX512 vector/broadcast memory operand
- unsigned AdjustAVX512Mem(unsigned Size, X86Operand* UnsizedMemOpNext);
-
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode64Bit];
@@ -1206,27 +1201,16 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
Identifier, Info.OpDecl);
}
+
// We either have a direct symbol reference, or an offset from a symbol. The
// parser always puts the symbol on the LHS, so look there for size
// calculation purposes.
+ unsigned FrontendSize = 0;
const MCBinaryExpr *BinOp = dyn_cast<MCBinaryExpr>(Disp);
bool IsSymRef =
isa<MCSymbolRefExpr>(BinOp ? BinOp->getLHS() : Disp);
- if (IsSymRef) {
- if (!Size) {
- Size = Info.Type * 8; // Size is in terms of bits in this context.
- if (Size)
- InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
- /*Len=*/0, Size);
- if (AllowBetterSizeMatch)
- // Handle cases where size qualifier is absent, upon an indirect symbol
- // reference - e.g. "vaddps zmm1, zmm2, [var]"
- // set Size to zero to allow matching mechansim to try and find a better
- // size qualifier than our initial guess, based on available variants of
- // the given instruction
- Size = 0;
- }
- }
+ if (IsSymRef && !Size && Info.Type)
+ FrontendSize = Info.Type * 8; // Size is in terms of bits in this context.
// When parsing inline assembly we set the base register to a non-zero value
// if we don't know the actual value at this time. This is necessary to
@@ -1234,7 +1218,7 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
BaseReg = BaseReg ? BaseReg : 1;
return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
IndexReg, Scale, Start, End, Size, Identifier,
- Info.OpDecl);
+ Info.OpDecl, FrontendSize);
}
static void
@@ -2884,23 +2868,6 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
return true;
}
-unsigned X86AsmParser::AdjustAVX512Mem(unsigned Size,
- X86Operand* UnsizedMemOpNext) {
- // Check for the existence of an AVX512 platform
- if (!getSTI().getFeatureBits()[X86::FeatureAVX512])
- return 0;
- // Allow adjusting upon a (x|y|z)mm
- if (Size == 512 || Size == 256 || Size == 128)
- return Size;
- // This is an allegadly broadcasting mem op adjustment,
- // allow some more inquiring to validate it
- if (Size == 64 || Size == 32)
- return UnsizedMemOpNext && UnsizedMemOpNext->isToken() &&
- UnsizedMemOpNext->getToken().substr(0, 4).equals("{1to") ? Size : 0;
- // Do not allow any other type of adjustments
- return 0;
-}
-
bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
@@ -2920,19 +2887,14 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// Find one unsized memory operand, if present.
X86Operand *UnsizedMemOp = nullptr;
- // If unsized memory operand was found - obtain following operand.
- // For use in AdjustAVX512Mem
- X86Operand *UnsizedMemOpNext = nullptr;
for (const auto &Op : Operands) {
X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
- if (UnsizedMemOp) {
- UnsizedMemOpNext = X86Op;
+ if (X86Op->isMemUnsized()) {
+ UnsizedMemOp = X86Op;
// Have we found an unqualified memory operand,
// break. IA allows only one memory operand.
break;
}
- if (X86Op->isMemUnsized())
- UnsizedMemOp = X86Op;
}
// Allow some instructions to have implicitly pointer-sized operands. This is
@@ -2978,7 +2940,6 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// If an unsized memory operand is present, try to match with each memory
// operand size. In Intel assembly, the size is not part of the instruction
// mnemonic.
- unsigned MatchedSize = 0;
if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
for (unsigned Size : MopSizes) {
@@ -2993,17 +2954,10 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
- if (M == Match_Success)
- // MS-compatability:
- // Adjust AVX512 vector/broadcast memory operand,
- // when facing the absence of a size qualifier.
- // Match GCC behavior on respective cases.
- MatchedSize = AdjustAVX512Mem(Size, UnsizedMemOpNext);
}
// Restore the size of the unsized memory operand if we modified it.
- if (UnsizedMemOp)
- UnsizedMemOp->Mem.Size = 0;
+ UnsizedMemOp->Mem.Size = 0;
}
// If we haven't matched anything yet, this is not a basic integer or FPU
@@ -3027,20 +2981,30 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
Op.getLocRange(), MatchingInlineAsm);
}
+ unsigned NumSuccessfulMatches =
+ std::count(std::begin(Match), std::end(Match), Match_Success);
+
+ // If matching was ambiguous and we had size information from the frontend,
+ // try again with that. This handles cases like "movxz eax, m8/m16".
+ if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
+ UnsizedMemOp->getMemFrontendSize()) {
+ UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
+ unsigned M = MatchInstruction(
+ Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax());
+ if (M == Match_Success)
+ NumSuccessfulMatches = 1;
+
+ // Add a rewrite that encodes the size information we used from the
+ // frontend.
+ InstInfo->AsmRewrites->emplace_back(
+ AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
+ /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
+ }
+
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
- unsigned NumSuccessfulMatches =
- std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
- if (MatchedSize && isParsingInlineAsm() && isParsingIntelSyntax())
- // MS compatibility -
- // Fix the rewrite according to the matched memory size
- // MS inline assembly only
- for (AsmRewrite &AR : *InstInfo->AsmRewrites)
- if ((AR.Loc.getPointer() == UnsizedMemOp->StartLoc.getPointer()) &&
- (AR.Kind == AOK_SizeDirective))
- AR.Val = MatchedSize;
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.
@@ -3057,7 +3021,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
"multiple matches only possible with unsized memory operands");
return Error(UnsizedMemOp->getStartLoc(),
"ambiguous operand size for instruction '" + Mnemonic + "\'",
- UnsizedMemOp->getLocRange(), MatchingInlineAsm);
+ UnsizedMemOp->getLocRange());
}
// If one instruction matched with a missing feature, report this as a
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h b/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h
index 9f1fa6c65907..33eff14b8215 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -62,6 +62,10 @@ struct X86Operand : public MCParsedAsmOperand {
unsigned Scale;
unsigned Size;
unsigned ModeSize;
+
+ /// If the memory operand is unsized and there are multiple instruction
+ /// matches, prefer the one with this size.
+ unsigned FrontendSize;
};
union {
@@ -136,6 +140,10 @@ struct X86Operand : public MCParsedAsmOperand {
assert(Kind == Memory && "Invalid access!");
return Mem.ModeSize;
}
+ unsigned getMemFrontendSize() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.FrontendSize;
+ }
bool isToken() const override {return Kind == Token; }
@@ -512,7 +520,7 @@ struct X86Operand : public MCParsedAsmOperand {
static std::unique_ptr<X86Operand>
CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0) {
auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
@@ -521,6 +529,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
+ Res->Mem.FrontendSize = FrontendSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
@@ -532,7 +541,7 @@ struct X86Operand : public MCParsedAsmOperand {
CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
@@ -548,6 +557,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.Scale = Scale;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
+ Res->Mem.FrontendSize = FrontendSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
index 44bc373b0394..d7c3b74d3efb 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -91,6 +91,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
X86MCInstLower &MCIL);
void LowerPATCHABLE_RET(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index a94045cd536d..331e56976db7 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -2990,6 +2990,10 @@ unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF)
void X86FrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
+ // Mark the function as not having WinCFI. We will set it back to true in
+ // emitPrologue if it gets called and emits CFI.
+ MF.setHasWinCFI(false);
+
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
const Function *Fn = MF.getFunction();
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index 83542aaa013b..9ee2234595f9 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1224,10 +1224,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
- setOperationAction(ISD::VSELECT, MVT::v8i1, Expand);
- setOperationAction(ISD::VSELECT, MVT::v16i1, Expand);
+
if (Subtarget.hasDQI()) {
for (auto VT : { MVT::v2i64, MVT::v4i64, MVT::v8i64 }) {
setOperationAction(ISD::SINT_TO_FP, VT, Legal);
@@ -1243,8 +1240,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
if (Subtarget.hasVLX()) {
- setOperationAction(ISD::ABS, MVT::v4i64, Legal);
- setOperationAction(ISD::ABS, MVT::v2i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
@@ -1270,8 +1265,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
}
- setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
@@ -1304,33 +1297,34 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
- setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
- setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
-
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
- setOperationAction(ISD::SELECT, MVT::v16i1, Custom);
- setOperationAction(ISD::SELECT, MVT::v8i1, Custom);
-
- setOperationAction(ISD::ADD, MVT::v8i1, Custom);
- setOperationAction(ISD::ADD, MVT::v16i1, Custom);
- setOperationAction(ISD::SUB, MVT::v8i1, Custom);
- setOperationAction(ISD::SUB, MVT::v16i1, Custom);
- setOperationAction(ISD::MUL, MVT::v8i1, Custom);
- setOperationAction(ISD::MUL, MVT::v16i1, Custom);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
+ // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
+ setOperationAction(ISD::ABS, MVT::v4i64, Legal);
+ setOperationAction(ISD::ABS, MVT::v2i64, Legal);
+
+ for (auto VT : { MVT::v8i1, MVT::v16i1 }) {
+ setOperationAction(ISD::ADD, VT, Custom);
+ setOperationAction(ISD::SUB, VT, Custom);
+ setOperationAction(ISD::MUL, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::TRUNCATE, VT, Custom);
+
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ }
+
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
@@ -1352,33 +1346,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::XOR, MVT::v16i32, MVT::v8i64);
if (Subtarget.hasCDI()) {
- setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
-
- setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
- setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
- setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
- setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
-
- if (Subtarget.hasVLX()) {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
- } else {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
+ // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
+ for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v16i32, MVT::v2i64,
+ MVT::v4i64, MVT::v8i64}) {
+ setOperationAction(ISD::CTLZ, VT, Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
}
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
} // Subtarget.hasCDI()
if (Subtarget.hasDQI()) {
@@ -6070,7 +6043,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- if (NumNonZero > 8)
+ if (NumNonZero > 8 && !Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
@@ -6158,7 +6131,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- if (NumNonZero > 4)
+ if (NumNonZero > 4 && !Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
@@ -6241,7 +6214,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
Elt = Op->getOperand(EltIdx);
// By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
- EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
+ EltMaskIdx = Elt.getConstantOperandVal(1);
if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
break;
Mask[EltIdx] = EltIdx;
@@ -6272,8 +6245,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
SDValue SrcVector = Current->getOperand(0);
if (!V1.getNode())
V1 = SrcVector;
- CanFold = SrcVector == V1 &&
- cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
+ CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i);
}
if (!CanFold)
@@ -20944,54 +20916,62 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
+// Split an unary integer op into 2 half sized ops.
+static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned SizeInBits = VT.getSizeInBits();
+
+ // Extract the Lo/Hi vectors
+ SDLoc dl(Op);
+ SDValue Src = Op.getOperand(0);
+ SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
+ SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
+
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
+ DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
+}
+
+// Decompose 256-bit ops into smaller 128-bit ops.
+static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType().is256BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
+ "Only handle AVX 256-bit vector integer operation");
+ return LowerVectorIntUnary(Op, DAG);
+}
+
+// Decompose 512-bit ops into smaller 256-bit ops.
+static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType().is512BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
+ "Only handle AVX 512-bit vector integer operation");
+ return LowerVectorIntUnary(Op, DAG);
+}
+
/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
//
-// 1. i32/i64 128/256-bit vector (native support require VLX) are expended
-// to 512-bit vector.
-// 2. i8/i16 vector implemented using dword LZCNT vector instruction
-// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
-// split the vector, perform operation on it's Lo a Hi part and
-// concatenate the results.
-static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
+// i8/i16 vector implemented using dword LZCNT vector instruction
+// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
+// split the vector, perform operation on it's Lo a Hi part and
+// concatenate the results.
+static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
- if (EltVT == MVT::i64 || EltVT == MVT::i32) {
- // Extend to 512 bit vector.
- assert((VT.is256BitVector() || VT.is128BitVector()) &&
- "Unsupported value type for operation");
-
- MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits());
- SDValue Vec512 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
- DAG.getUNDEF(NewVT),
- Op.getOperand(0),
- DAG.getIntPtrConstant(0, dl));
- SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Vec512);
-
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode,
- DAG.getIntPtrConstant(0, dl));
- }
-
assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
"Unsupported element type");
- if (16 < NumElems) {
- // Split vector, it's Lo and Hi parts will be handled in next iteration.
- SDValue Lo, Hi;
- std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
- MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- Lo = DAG.getNode(ISD::CTLZ, dl, OutVT, Lo);
- Hi = DAG.getNode(ISD::CTLZ, dl, OutVT, Hi);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
- }
+ // Split vector, it's Lo and Hi parts will be handled in next iteration.
+ if (16 < NumElems)
+ return LowerVectorIntUnary(Op, DAG);
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
-
assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation");
@@ -21078,23 +21058,17 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- SDValue Op0 = Op.getOperand(0);
- if (Subtarget.hasAVX512())
- return LowerVectorCTLZ_AVX512(Op, DAG);
+ if (Subtarget.hasCDI())
+ return LowerVectorCTLZ_AVX512CDI(Op, DAG);
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- unsigned NumElems = VT.getVectorNumElements();
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
- // Extract each 128-bit vector, perform ctlz and concat the result.
- SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- DAG.getNode(ISD::CTLZ, DL, LHS.getValueType(), LHS),
- DAG.getNode(ISD::CTLZ, DL, RHS.getValueType(), RHS));
- }
+ // Decompose 512-bit ops into smaller 256-bit ops.
+ if (VT.is512BitVector() && !Subtarget.hasBWI())
+ return Lower512IntUnary(Op, DAG);
assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
@@ -21258,19 +21232,7 @@ static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
- MVT VT = Op.getSimpleValueType();
- unsigned NumElems = VT.getVectorNumElements();
-
- SDLoc dl(Op);
- SDValue Src = Op.getOperand(0);
- SDValue Lo = extract128BitVector(Src, 0, DAG, dl);
- SDValue Hi = extract128BitVector(Src, NumElems / 2, DAG, dl);
-
- MVT EltVT = VT.getVectorElementType();
- MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- DAG.getNode(ISD::ABS, dl, NewVT, Lo),
- DAG.getNode(ISD::ABS, dl, NewVT, Hi));
+ return Lower256IntUnary(Op, DAG);
}
static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
@@ -23049,29 +23011,13 @@ static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
}
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract each 128-bit vector, compute pop count and concat the result.
- SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
- LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
- }
-
- if (VT.is512BitVector() && !Subtarget.hasBWI()) {
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract each 256-bit vector, compute pop count and concat the result.
- SDValue LHS = extract256BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract256BitVector(Op0, NumElems / 2, DAG, DL);
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
- LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
- }
+ // Decompose 512-bit ops into smaller 256-bit ops.
+ if (VT.is512BitVector() && !Subtarget.hasBWI())
+ return Lower512IntUnary(Op, DAG);
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
@@ -23098,20 +23044,12 @@ static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
DAG.getIntPtrConstant(0, DL));
}
- MVT SVT = VT.getVectorElementType();
int NumElts = VT.getVectorNumElements();
int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector()) {
- SDValue Lo = extract128BitVector(In, 0, DAG, DL);
- SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
-
- MVT HalfVT = MVT::getVectorVT(SVT, NumElts / 2);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo),
- DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi));
- }
+ if (VT.is256BitVector())
+ return Lower256IntUnary(Op, DAG);
assert(VT.is128BitVector() &&
"Only 128-bit vector bitreverse lowering supported.");
@@ -23152,14 +23090,8 @@ static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
"Only byte vector BITREVERSE supported");
// Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- MVT HalfVT = MVT::getVectorVT(MVT::i8, NumElts / 2);
- SDValue Lo = extract128BitVector(In, 0, DAG, DL);
- SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
- Lo = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo);
- Hi = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
- }
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
// Perform BITREVERSE using PSHUFB lookups. Each byte is split into
// two nibbles and a PSHUFB lookup to find the bitreverse of each
@@ -26585,6 +26517,10 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ // Do nothing here, handle in xray instrumentation pass.
+ return BB;
case X86::LCMPXCHG8B: {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
@@ -26667,7 +26603,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Opc) {
default: break;
case X86ISD::ADD:
@@ -26697,7 +26633,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
- Known.Zero.setAllBits();
+ Known.setAllZero();
break;
}
@@ -26729,8 +26665,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Known = KnownBits(InBitWidth);
APInt DemandedSrcElts = APInt::getLowBitsSet(InNumElts, NumElts);
DAG.computeKnownBits(N0, Known, DemandedSrcElts, Depth + 1);
- Known.One = Known.One.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBitWidth);
break;
}
@@ -31671,10 +31606,9 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
if (auto *AmtConst = AmtBV->getConstantSplatNode())
SraAmt = AmtConst->getZExtValue();
- } else if (Mask.getOpcode() == X86ISD::VSRAI) {
- SDValue SraC = Mask.getOperand(1);
- SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
- }
+ } else if (Mask.getOpcode() == X86ISD::VSRAI)
+ SraAmt = Mask.getConstantOperandVal(1);
+
if ((SraAmt + 1) != EltBits)
return SDValue();
@@ -31708,7 +31642,9 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
V = Y;
if (V) {
- assert(EltBits == 8 || EltBits == 16 || EltBits == 32);
+ if (EltBits != 8 && EltBits != 16 && EltBits != 32)
+ return SDValue();
+
SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
SDValue SubOp2 = Mask;
@@ -34488,8 +34424,7 @@ static SDValue combineX86ADD(SDNode *N, SelectionDAG &DAG,
if (Carry.getOpcode() == ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC_CARRY) {
- auto *Cond = cast<ConstantSDNode>(Carry.getOperand(0));
- if (Cond->getZExtValue() == X86::COND_B)
+ if (Carry.getConstantOperandVal(0) == X86::COND_B)
return DCI.CombineTo(N, SDValue(N, 0), Carry.getOperand(1));
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrAVX512.td b/contrib/llvm/lib/Target/X86/X86InstrAVX512.td
index c38c13bb9757..71d395244b4a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -8631,6 +8631,20 @@ multiclass avx512_unary_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
defm VPABS : avx512_unary_rm_vl_all<0x1C, 0x1D, 0x1E, 0x1F, "vpabs", abs>;
+// VPABS: Use 512bit version to implement 128/256 bit in case NoVLX.
+let Predicates = [HasAVX512, NoVLX] in {
+ def : Pat<(v4i64 (abs VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPABSQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v2i64 (abs VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPABSQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+}
+
multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
defm NAME : avx512_unary_rm_vl_dq<opc, opc, OpcodeStr, ctlz, prd>;
@@ -8639,6 +8653,31 @@ multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
defm VPLZCNT : avx512_ctlz<0x44, "vplzcnt", HasCDI>;
defm VPCONFLICT : avx512_unary_rm_vl_dq<0xC4, 0xC4, "vpconflict", X86Conflict, HasCDI>;
+// VPLZCNT: Use 512bit version to implement 128/256 bit in case NoVLX.
+let Predicates = [HasCDI, NoVLX] in {
+ def : Pat<(v4i64 (ctlz VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v2i64 (ctlz VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+
+ def : Pat<(v8i32 (ctlz VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTDZrr
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v4i32 (ctlz VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTDZrr
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+}
+
//===---------------------------------------------------------------------===//
// Replicate Single FP - MOVSHDUP and MOVSLDUP
//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index cdf7ce19cdc8..902b0c2c04e3 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -1995,11 +1995,11 @@ def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
Requires<[In64BitMode]>;
// Data16 instruction prefix
-def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
+def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
Requires<[Not16BitMode]>;
// Data instruction prefix
-def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
+def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
Requires<[In16BitMode]>;
// Repeat string operation instruction prefixes
@@ -2518,7 +2518,7 @@ let SchedRW = [ WriteSystem ] in {
}
let Uses = [ ECX, EAX, EBX ] in {
- def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
+ def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
[(int_x86_mwaitx ECX, EAX, EBX)], IIC_SSE_MWAITX>,
TB, Requires<[ HasMWAITX ]>;
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index f22a50200c9a..48da2fa607af 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -6718,22 +6718,23 @@ let Constraints = "$src1 = $dst" in {
SSE_INTMUL_ITINS_P, 1>;
}
-let Predicates = [HasAVX, NoVLX] in {
+let Predicates = [HasAVX, NoVLX] in
defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_WIG;
+let Predicates = [HasAVX] in
defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_WIG;
-}
-let Predicates = [HasAVX2] in {
+
+let Predicates = [HasAVX2, NoVLX] in
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_L, VEX_WIG;
+let Predicates = [HasAVX2] in
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L, VEX_WIG;
-}
let Constraints = "$src1 = $dst" in {
defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
diff --git a/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp b/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 38f7bc0af5c7..d65eb1de8d09 100644
--- a/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -65,8 +65,8 @@ private:
MachineFunction &MF) const;
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
- bool selectFrameIndex(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
+ bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -235,7 +235,7 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
return true;
if (selectLoadStoreOp(I, MRI, MF))
return true;
- if (selectFrameIndex(I, MRI, MF))
+ if (selectFrameIndexOrGep(I, MRI, MF))
return true;
if (selectConstant(I, MRI, MF))
return true;
@@ -427,27 +427,37 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-bool X86InstructionSelector::selectFrameIndex(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_FRAME_INDEX)
+bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ unsigned Opc = I.getOpcode();
+
+ if (Opc != TargetOpcode::G_FRAME_INDEX && Opc != TargetOpcode::G_GEP)
return false;
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
- // Use LEA to calculate frame index.
+ // Use LEA to calculate frame index and GEP
unsigned NewOpc;
if (Ty == LLT::pointer(0, 64))
NewOpc = X86::LEA64r;
else if (Ty == LLT::pointer(0, 32))
NewOpc = STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
else
- llvm_unreachable("Can't select G_FRAME_INDEX, unsupported type.");
+ llvm_unreachable("Can't select G_FRAME_INDEX/G_GEP, unsupported type.");
I.setDesc(TII.get(NewOpc));
MachineInstrBuilder MIB(MF, I);
- addOffset(MIB, 0);
+
+ if (Opc == TargetOpcode::G_FRAME_INDEX) {
+ addOffset(MIB, 0);
+ } else {
+ MachineOperand &InxOp = I.getOperand(2);
+ I.addOperand(InxOp); // set IndexReg
+ InxOp.ChangeToImmediate(1); // set Scale
+ MIB.addImm(0).addReg(0);
+ }
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
diff --git a/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp b/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp
index a437f6bf4714..4f5e70414aa9 100644
--- a/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp
@@ -34,6 +34,11 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizerInfo64bit();
setLegalizerInfoSSE1();
setLegalizerInfoSSE2();
+ setLegalizerInfoSSE41();
+ setLegalizerInfoAVX2();
+ setLegalizerInfoAVX512();
+ setLegalizerInfoAVX512DQ();
+ setLegalizerInfoAVX512BW();
computeTables();
}
@@ -50,7 +55,7 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
- for (unsigned BinOp : {G_ADD, G_SUB})
+ for (unsigned BinOp : {G_ADD, G_SUB, G_MUL})
for (auto Ty : {s8, s16, s32})
setAction({BinOp, Ty}, Legal);
@@ -65,6 +70,12 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GEP, p0}, Legal);
+ setAction({G_GEP, 1, s32}, Legal);
+
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_GEP, 1, Ty}, WidenScalar);
+
// Constants
for (auto Ty : {s8, s16, s32, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
@@ -94,7 +105,7 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
- for (unsigned BinOp : {G_ADD, G_SUB})
+ for (unsigned BinOp : {G_ADD, G_SUB, G_MUL})
for (auto Ty : {s8, s16, s32, s64})
setAction({BinOp, Ty}, Legal);
@@ -109,6 +120,13 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GEP, p0}, Legal);
+ setAction({G_GEP, 1, s32}, Legal);
+ setAction({G_GEP, 1, s64}, Legal);
+
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_GEP, 1, Ty}, WidenScalar);
+
// Constants
for (auto Ty : {s8, s16, s32, s64, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
@@ -149,6 +167,7 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
return;
const LLT s64 = LLT::scalar(64);
+ const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
@@ -159,4 +178,83 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
for (unsigned BinOp : {G_ADD, G_SUB})
for (auto Ty : {v4s32})
setAction({BinOp, Ty}, Legal);
+
+ setAction({G_MUL, v8s16}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoSSE41() {
+ if (!Subtarget.hasSSE41())
+ return;
+
+ const LLT v4s32 = LLT::vector(4, 32);
+
+ setAction({G_MUL, v4s32}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX2() {
+ if (!Subtarget.hasAVX2())
+ return;
+
+ const LLT v16s16 = LLT::vector(16, 16);
+ const LLT v8s32 = LLT::vector(8, 32);
+
+ for (auto Ty : {v16s16, v8s32})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512() {
+ if (!Subtarget.hasAVX512())
+ return;
+
+ const LLT v16s32 = LLT::vector(16, 32);
+
+ setAction({G_MUL, v16s32}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v4s32 = LLT::vector(4, 32);
+ const LLT v8s32 = LLT::vector(8, 32);
+
+ for (auto Ty : {v4s32, v8s32})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
+ if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
+ return;
+
+ const LLT v8s64 = LLT::vector(8, 64);
+
+ setAction({G_MUL, v8s64}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v2s64 = LLT::vector(2, 64);
+ const LLT v4s64 = LLT::vector(4, 64);
+
+ for (auto Ty : {v2s64, v4s64})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
+ if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
+ return;
+
+ const LLT v32s16 = LLT::vector(32, 16);
+
+ setAction({G_MUL, v32s16}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v8s16 = LLT::vector(8, 16);
+ const LLT v16s16 = LLT::vector(16, 16);
+
+ for (auto Ty : {v8s16, v16s16})
+ setAction({G_MUL, Ty}, Legal);
}
diff --git a/contrib/llvm/lib/Target/X86/X86LegalizerInfo.h b/contrib/llvm/lib/Target/X86/X86LegalizerInfo.h
index 3f00898b4232..ab5405a70427 100644
--- a/contrib/llvm/lib/Target/X86/X86LegalizerInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86LegalizerInfo.h
@@ -38,6 +38,11 @@ private:
void setLegalizerInfo64bit();
void setLegalizerInfoSSE1();
void setLegalizerInfoSSE2();
+ void setLegalizerInfoSSE41();
+ void setLegalizerInfoAVX2();
+ void setLegalizerInfoAVX512();
+ void setLegalizerInfoAVX512DQ();
+ void setLegalizerInfoAVX512BW();
};
} // namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
index 550e3543a71e..598d88d8b9c3 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -1040,6 +1040,83 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
getSubtargetInfo());
}
+void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ assert(Subtarget->is64Bit() && "XRay custom events only suports X86-64");
+
+ // We want to emit the following pattern, which follows the x86 calling
+ // convention to prepare for the trampoline call to be patched in.
+ //
+ // <args placement according SysV64 calling convention>
+ // .p2align 1, ...
+ // .Lxray_event_sled_N:
+ // jmp +N // jump across the call instruction
+ // callq __xray_CustomEvent // force relocation to symbol
+ // <args cleanup, jump to here>
+ //
+ // The relative jump needs to jump forward 24 bytes:
+ // 10 (args) + 5 (nops) + 9 (cleanup)
+ //
+ // After patching, it would look something like:
+ //
+ // nopw (2-byte nop)
+ // callq __xrayCustomEvent // already lowered
+ //
+ // ---
+ // First we emit the label and the jump.
+ auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true);
+ OutStreamer->AddComment("# XRay Custom Event Log");
+ OutStreamer->EmitCodeAlignment(2);
+ OutStreamer->EmitLabel(CurSled);
+
+ // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
+ // an operand (computed as an offset from the jmp instruction).
+ // FIXME: Find another less hacky way do force the relative jump.
+ OutStreamer->EmitBytes("\xeb\x14");
+
+ // The default C calling convention will place two arguments into %rcx and
+ // %rdx -- so we only work with those.
+ unsigned UsedRegs[] = {X86::RDI, X86::RSI, X86::RAX};
+
+ // Because we will use %rax, we preserve that across the call.
+ EmitAndCountInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RAX));
+
+ // Then we put the operands in the %rdi and %rsi registers.
+ for (unsigned I = 0; I < MI.getNumOperands(); ++I)
+ if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) {
+ if (Op->isImm())
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(UsedRegs[I])
+ .addImm(Op->getImm()));
+ else if (Op->isReg()) {
+ if (Op->getReg() != UsedRegs[I])
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(UsedRegs[I])
+ .addReg(Op->getReg()));
+ else
+ EmitNops(*OutStreamer, 3, Subtarget->is64Bit(), getSubtargetInfo());
+ }
+ }
+
+ // We emit a hard dependency on the __xray_CustomEvent symbol, which is the
+ // name of the trampoline to be implemented by the XRay runtime. We put this
+ // explicitly in the %rax register.
+ auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent");
+ MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(X86::RAX)
+ .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
+
+ // Emit the call instruction.
+ EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(X86::RAX));
+
+ // Restore caller-saved and used registers.
+ OutStreamer->AddComment("xray custom event end.");
+ EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RAX));
+
+ recordSled(CurSled, MI, SledKind::CUSTOM_EVENT);
+}
+
void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// We want to emit the following pattern:
@@ -1415,6 +1492,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::PATCHABLE_TAIL_CALL:
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);
case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
diff --git a/contrib/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/contrib/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 7be0a7fd4067..aabbf67a16b6 100644
--- a/contrib/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/contrib/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -223,8 +223,6 @@ public:
StringRef getPassName() const override { return "X86 LEA Optimize"; }
- bool doInitialization(Module &M) override;
-
/// \brief Loop over all of the basic blocks, replacing address
/// calculations in load and store instructions, if it's already
/// been calculated by LEA. Also, remove redundant LEAs.
@@ -280,7 +278,6 @@ private:
MachineRegisterInfo *MRI;
const X86InstrInfo *TII;
const X86RegisterInfo *TRI;
- Module *TheModule;
static char ID;
};
@@ -649,11 +646,6 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
return Changed;
}
-bool OptimizeLEAPass::doInitialization(Module &M) {
- TheModule = &M;
- return false;
-}
-
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp
index 0f8a750a0235..efd3df26dd42 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.cpp
@@ -139,8 +139,9 @@ bool X86RegisterBankInfo::getInstrValueMapping(
return true;
}
-RegisterBankInfo::InstructionMapping
-X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI, bool isFP) {
+const RegisterBankInfo::InstructionMapping &
+X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
+ bool isFP) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -152,10 +153,10 @@ X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI, bool isFP) {
llvm_unreachable("Unsupported operand mapping yet.");
auto Mapping = getValueMapping(getPartialMappingIdx(Ty, isFP), 3);
- return InstructionMapping{DefaultMappingID, 1, Mapping, NumOperands};
+ return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -164,7 +165,7 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -193,10 +194,10 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Finally construct the computed mapping.
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
- return InstructionMapping();
+ return getInvalidInstructionMapping();
- return InstructionMapping{DefaultMappingID, /* Cost */ 1,
- getOperandsMapping(OpdsMapping), NumOperands};
+ return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
+ getOperandsMapping(OpdsMapping), NumOperands);
}
void X86RegisterBankInfo::applyMappingImpl(
@@ -231,10 +232,10 @@ X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
break;
- RegisterBankInfo::InstructionMapping Mapping = InstructionMapping{
- /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands};
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
+ /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
InstructionMappings AltMappings;
- AltMappings.emplace_back(std::move(Mapping));
+ AltMappings.push_back(&Mapping);
return AltMappings;
}
default:
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.h
index a1e01a9ab949..e227880427f3 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterBankInfo.h
@@ -46,8 +46,8 @@ private:
/// Get an instruction mapping.
/// \return An InstructionMappings with a statically allocated
/// OperandsMapping.
- static InstructionMapping getSameOperandsMapping(const MachineInstr &MI,
- bool isFP);
+ const InstructionMapping &getSameOperandsMapping(const MachineInstr &MI,
+ bool isFP) const;
/// Track the bank of each instruction operand(register)
static void
@@ -74,7 +74,8 @@ public:
/// See RegisterBankInfo::applyMapping.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index 9ab751e2b002..d66d39dcee17 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -139,12 +139,18 @@ X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
return X86II::MO_NO_FLAG;
assert(!isTargetCOFF());
+ const Function *F = dyn_cast_or_null<Function>(GV);
- if (isTargetELF())
+ if (isTargetELF()) {
+ if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
+ // According to psABI, PLT stub clobbers XMM8-XMM15.
+ // In Regcall calling convention those registers are used for passing
+ // parameters. Thus we need to prevent lazy binding in Regcall.
+ return X86II::MO_GOTPCREL;
return X86II::MO_PLT;
+ }
if (is64Bit()) {
- auto *F = dyn_cast_or_null<Function>(GV);
if (F && F->hasFnAttribute(Attribute::NonLazyBind))
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
diff --git a/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index b742fb472372..f3b619a2956a 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -1426,25 +1426,25 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
{ ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
};
static const CostTblEntry AVX1CostTbl[] = {
- { ISD::BITREVERSE, MVT::v4i64, 10 },
- { ISD::BITREVERSE, MVT::v8i32, 10 },
- { ISD::BITREVERSE, MVT::v16i16, 10 },
- { ISD::BITREVERSE, MVT::v32i8, 10 },
+ { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
{ ISD::BSWAP, MVT::v4i64, 4 },
{ ISD::BSWAP, MVT::v8i32, 4 },
{ ISD::BSWAP, MVT::v16i16, 4 },
- { ISD::CTLZ, MVT::v4i64, 46 },
- { ISD::CTLZ, MVT::v8i32, 36 },
- { ISD::CTLZ, MVT::v16i16, 28 },
- { ISD::CTLZ, MVT::v32i8, 18 },
- { ISD::CTPOP, MVT::v4i64, 14 },
- { ISD::CTPOP, MVT::v8i32, 22 },
- { ISD::CTPOP, MVT::v16i16, 18 },
- { ISD::CTPOP, MVT::v32i8, 12 },
- { ISD::CTTZ, MVT::v4i64, 20 },
- { ISD::CTTZ, MVT::v8i32, 28 },
- { ISD::CTTZ, MVT::v16i16, 24 },
- { ISD::CTTZ, MVT::v32i8, 18 },
+ { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
{ ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
{ ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
{ ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 4d3ecf25dc34..b8742683a0c8 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1825,7 +1825,7 @@ void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case XCoreISD::LADD:
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 6408cad08d55..d8cf8d3f5da2 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -247,7 +247,7 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
if (!ArgIndex.second.empty()) {
Ops.reserve(ArgIndex.second.size());
Type *ElTy = V->getType();
- for (unsigned long II : ArgIndex.second) {
+ for (auto II : ArgIndex.second) {
// Use i32 to index structs, and i64 for others (pointers/arrays).
// This satisfies GEP constraints.
Type *IdxTy =
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp
index c7ef2494e3b8..7ed07d63c627 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -117,7 +117,7 @@ namespace {
/// - [insert you fancy metric here]
static const GlobalValueSummary *
selectCallee(const ModuleSummaryIndex &Index,
- const GlobalValueSummaryList &CalleeSummaryList,
+ ArrayRef<std::unique_ptr<GlobalValueSummary>> CalleeSummaryList,
unsigned Threshold, StringRef CallerModulePath) {
auto It = llvm::find_if(
CalleeSummaryList,
@@ -168,19 +168,6 @@ selectCallee(const ModuleSummaryIndex &Index,
return cast<GlobalValueSummary>(It->get());
}
-/// Return the summary for the function \p GUID that fits the \p Threshold, or
-/// null if there's no match.
-static const GlobalValueSummary *selectCallee(GlobalValue::GUID GUID,
- unsigned Threshold,
- const ModuleSummaryIndex &Index,
- StringRef CallerModulePath) {
- auto CalleeSummaryList = Index.findGlobalValueSummaryList(GUID);
- if (CalleeSummaryList == Index.end())
- return nullptr; // This function does not have a summary
- return selectCallee(Index, CalleeSummaryList->second, Threshold,
- CallerModulePath);
-}
-
using EdgeInfo = std::tuple<const FunctionSummary *, unsigned /* Threshold */,
GlobalValue::GUID>;
@@ -194,19 +181,23 @@ static void computeImportForFunction(
FunctionImporter::ImportMapTy &ImportList,
StringMap<FunctionImporter::ExportSetTy> *ExportLists = nullptr) {
for (auto &Edge : Summary.calls()) {
- auto GUID = Edge.first.getGUID();
- DEBUG(dbgs() << " edge -> " << GUID << " Threshold:" << Threshold << "\n");
+ ValueInfo VI = Edge.first;
+ DEBUG(dbgs() << " edge -> " << VI.getGUID() << " Threshold:" << Threshold
+ << "\n");
- if (Index.findGlobalValueSummaryList(GUID) == Index.end()) {
+ if (VI.getSummaryList().empty()) {
// For SamplePGO, the indirect call targets for local functions will
// have its original name annotated in profile. We try to find the
// corresponding PGOFuncName as the GUID.
- GUID = Index.getGUIDFromOriginalID(GUID);
+ auto GUID = Index.getGUIDFromOriginalID(VI.getGUID());
if (GUID == 0)
continue;
+ VI = Index.getValueInfo(GUID);
+ if (!VI)
+ continue;
}
- if (DefinedGVSummaries.count(GUID)) {
+ if (DefinedGVSummaries.count(VI.getGUID())) {
DEBUG(dbgs() << "ignored! Target already in destination module.\n");
continue;
}
@@ -222,8 +213,8 @@ static void computeImportForFunction(
const auto NewThreshold =
Threshold * GetBonusMultiplier(Edge.second.Hotness);
- auto *CalleeSummary =
- selectCallee(GUID, NewThreshold, Index, Summary.modulePath());
+ auto *CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
+ Summary.modulePath());
if (!CalleeSummary) {
DEBUG(dbgs() << "ignored! No qualifying callee with summary found.\n");
continue;
@@ -255,7 +246,7 @@ static void computeImportForFunction(
const auto AdjThreshold = GetAdjustedThreshold(Threshold, IsHotCallsite);
auto ExportModulePath = ResolvedCalleeSummary->modulePath();
- auto &ProcessedThreshold = ImportList[ExportModulePath][GUID];
+ auto &ProcessedThreshold = ImportList[ExportModulePath][VI.getGUID()];
/// Since the traversal of the call graph is DFS, we can revisit a function
/// a second time with a higher threshold. In this case, it is added back to
/// the worklist with the new threshold.
@@ -271,7 +262,7 @@ static void computeImportForFunction(
// Make exports in the source module.
if (ExportLists) {
auto &ExportList = (*ExportLists)[ExportModulePath];
- ExportList.insert(GUID);
+ ExportList.insert(VI.getGUID());
if (!PreviouslyImported) {
// This is the first time this function was exported from its source
// module, so mark all functions and globals it references as exported
@@ -291,7 +282,7 @@ static void computeImportForFunction(
}
// Insert the newly imported function to the worklist.
- Worklist.emplace_back(ResolvedCalleeSummary, AdjThreshold, GUID);
+ Worklist.emplace_back(ResolvedCalleeSummary, AdjThreshold, VI.getGUID());
}
}
@@ -431,57 +422,56 @@ DenseSet<GlobalValue::GUID> llvm::computeDeadSymbols(
if (GUIDPreservedSymbols.empty())
// Don't do anything when nothing is live, this is friendly with tests.
return DenseSet<GlobalValue::GUID>();
- DenseSet<GlobalValue::GUID> LiveSymbols = GUIDPreservedSymbols;
- SmallVector<GlobalValue::GUID, 128> Worklist;
- Worklist.reserve(LiveSymbols.size() * 2);
- for (auto GUID : LiveSymbols) {
- DEBUG(dbgs() << "Live root: " << GUID << "\n");
- Worklist.push_back(GUID);
+ DenseSet<ValueInfo> LiveSymbols;
+ SmallVector<ValueInfo, 128> Worklist;
+ Worklist.reserve(GUIDPreservedSymbols.size() * 2);
+ for (auto GUID : GUIDPreservedSymbols) {
+ ValueInfo VI = Index.getValueInfo(GUID);
+ if (!VI)
+ continue;
+ DEBUG(dbgs() << "Live root: " << VI.getGUID() << "\n");
+ LiveSymbols.insert(VI);
+ Worklist.push_back(VI);
}
// Add values flagged in the index as live roots to the worklist.
for (const auto &Entry : Index) {
bool IsLiveRoot = llvm::any_of(
- Entry.second,
+ Entry.second.SummaryList,
[&](const std::unique_ptr<llvm::GlobalValueSummary> &Summary) {
return Summary->liveRoot();
});
if (!IsLiveRoot)
continue;
DEBUG(dbgs() << "Live root (summary): " << Entry.first << "\n");
- Worklist.push_back(Entry.first);
+ Worklist.push_back(ValueInfo(&Entry));
}
while (!Worklist.empty()) {
- auto GUID = Worklist.pop_back_val();
- auto It = Index.findGlobalValueSummaryList(GUID);
- if (It == Index.end()) {
- DEBUG(dbgs() << "Not in index: " << GUID << "\n");
- continue;
- }
+ auto VI = Worklist.pop_back_val();
// FIXME: we should only make the prevailing copy live here
- for (auto &Summary : It->second) {
+ for (auto &Summary : VI.getSummaryList()) {
for (auto Ref : Summary->refs()) {
- auto RefGUID = Ref.getGUID();
- if (LiveSymbols.insert(RefGUID).second) {
- DEBUG(dbgs() << "Marking live (ref): " << RefGUID << "\n");
- Worklist.push_back(RefGUID);
+ if (LiveSymbols.insert(Ref).second) {
+ DEBUG(dbgs() << "Marking live (ref): " << Ref.getGUID() << "\n");
+ Worklist.push_back(Ref);
}
}
if (auto *FS = dyn_cast<FunctionSummary>(Summary.get())) {
for (auto Call : FS->calls()) {
- auto CallGUID = Call.first.getGUID();
- if (LiveSymbols.insert(CallGUID).second) {
- DEBUG(dbgs() << "Marking live (call): " << CallGUID << "\n");
- Worklist.push_back(CallGUID);
+ if (LiveSymbols.insert(Call.first).second) {
+ DEBUG(dbgs() << "Marking live (call): " << Call.first.getGUID()
+ << "\n");
+ Worklist.push_back(Call.first);
}
}
}
if (auto *AS = dyn_cast<AliasSummary>(Summary.get())) {
auto AliaseeGUID = AS->getAliasee().getOriginalName();
- if (LiveSymbols.insert(AliaseeGUID).second) {
+ ValueInfo AliaseeVI = Index.getValueInfo(AliaseeGUID);
+ if (AliaseeVI && LiveSymbols.insert(AliaseeVI).second) {
DEBUG(dbgs() << "Marking live (alias): " << AliaseeGUID << "\n");
- Worklist.push_back(AliaseeGUID);
+ Worklist.push_back(AliaseeVI);
}
}
}
@@ -490,10 +480,9 @@ DenseSet<GlobalValue::GUID> llvm::computeDeadSymbols(
DeadSymbols.reserve(
std::min(Index.size(), Index.size() - LiveSymbols.size()));
for (auto &Entry : Index) {
- auto GUID = Entry.first;
- if (!LiveSymbols.count(GUID)) {
- DEBUG(dbgs() << "Marking dead: " << GUID << "\n");
- DeadSymbols.insert(GUID);
+ if (!LiveSymbols.count(ValueInfo(&Entry))) {
+ DEBUG(dbgs() << "Marking dead: " << Entry.first << "\n");
+ DeadSymbols.insert(Entry.first);
}
}
DEBUG(dbgs() << LiveSymbols.size() << " symbols Live, and "
@@ -825,7 +814,7 @@ static bool doImportingForModule(Module &M) {
// is only enabled when testing importing via the 'opt' tool, which does
// not do the ThinLink that would normally determine what values to promote.
for (auto &I : *Index) {
- for (auto &S : I.second) {
+ for (auto &S : I.second.SummaryList) {
if (GlobalValue::isLocalLinkage(S->linkage()))
S->setLinkage(GlobalValue::ExternalLinkage);
}
diff --git a/contrib/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/contrib/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 785207efbe5c..ca4ee92f971a 100644
--- a/contrib/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1440,7 +1440,7 @@ bool LowerTypeTestsModule::lower() {
}
for (auto &P : *ExportSummary) {
- for (auto &S : P.second) {
+ for (auto &S : P.second.SummaryList) {
auto *FS = dyn_cast<FunctionSummary>(S.get());
if (!FS)
continue;
diff --git a/contrib/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/contrib/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index cb7d487b68b0..aae22c5457ba 100644
--- a/contrib/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -1322,7 +1322,7 @@ bool DevirtModule::run() {
}
for (auto &P : *ExportSummary) {
- for (auto &S : P.second) {
+ for (auto &S : P.second.SummaryList) {
auto *FS = dyn_cast<FunctionSummary>(S.get());
if (!FS)
continue;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 4f1f19499768..153a186d5ed4 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -847,29 +847,49 @@ Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
}
-// If one of the operands only has one non-zero bit, and if the other
-// operand has a known-zero bit in a more significant place than it (not
-// including the sign bit) the ripple may go up to and fill the zero, but
-// won't change the sign. For example, (X & ~4) + 1.
-static bool checkRippleForAdd(const APInt &Op0KnownZero,
- const APInt &Op1KnownZero) {
- APInt Op1MaybeOne = ~Op1KnownZero;
- // Make sure that one of the operand has at most one bit set to 1.
- if (Op1MaybeOne.countPopulation() != 1)
- return false;
-
- // Find the most significant known 0 other than the sign bit.
- int BitWidth = Op0KnownZero.getBitWidth();
- APInt Op0KnownZeroTemp(Op0KnownZero);
- Op0KnownZeroTemp.clearSignBit();
- int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1;
-
- int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1;
- assert(Op1OnePosition >= 0);
-
- // This also covers the case of no known zero, since in that case
- // Op0ZeroPosition is -1.
- return Op0ZeroPosition >= Op1OnePosition;
+/// \brief Return true if we can prove that adding the two values of the
+/// knownbits will not overflow.
+/// Otherwise return false.
+static bool checkRippleForAdd(const KnownBits &LHSKnown,
+ const KnownBits &RHSKnown) {
+ // Addition of two 2's complement numbers having opposite signs will never
+ // overflow.
+ if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
+ (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
+ return true;
+
+ // If either of the values is known to be non-negative, adding them can only
+ // overflow if the second is also non-negative, so we can assume that.
+ // Two non-negative numbers will only overflow if there is a carry to the
+ // sign bit, so we can check if even when the values are as big as possible
+ // there is no overflow to the sign bit.
+ if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
+ APInt MaxLHS = ~LHSKnown.Zero;
+ MaxLHS.clearSignBit();
+ APInt MaxRHS = ~RHSKnown.Zero;
+ MaxRHS.clearSignBit();
+ APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
+ return Result.isSignBitClear();
+ }
+
+ // If either of the values is known to be negative, adding them can only
+ // overflow if the second is also negative, so we can assume that.
+ // Two negative number will only overflow if there is no carry to the sign
+ // bit, so we can check if even when the values are as small as possible
+ // there is overflow to the sign bit.
+ if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
+ APInt MinLHS = LHSKnown.One;
+ MinLHS.clearSignBit();
+ APInt MinRHS = RHSKnown.One;
+ MinRHS.clearSignBit();
+ APInt Result = std::move(MinLHS) + std::move(MinRHS);
+ return Result.isSignBitSet();
+ }
+
+ // If we reached here it means that we know nothing about the sign bits.
+ // In this case we can't know if there will be an overflow, since by
+ // changing the sign bits any two values can be made to overflow.
+ return false;
}
/// Return true if we can prove that:
@@ -906,16 +926,8 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
KnownBits RHSKnown(BitWidth);
computeKnownBits(RHS, RHSKnown, 0, &CxtI);
- // Addition of two 2's complement numbers having opposite signs will never
- // overflow.
- if ((LHSKnown.One[BitWidth - 1] && RHSKnown.Zero[BitWidth - 1]) ||
- (LHSKnown.Zero[BitWidth - 1] && RHSKnown.One[BitWidth - 1]))
- return true;
-
// Check if carry bit of addition will not cause overflow.
- if (checkRippleForAdd(LHSKnown.Zero, RHSKnown.Zero))
- return true;
- if (checkRippleForAdd(RHSKnown.Zero, LHSKnown.Zero))
+ if (checkRippleForAdd(LHSKnown, RHSKnown))
return true;
return false;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index c7092bf3a398..b114801cc1c0 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1834,25 +1834,8 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
break;
- case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
- case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
- return RHS;
}
break;
- case ICmpInst::ICMP_NE:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
- case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
- case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
- case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
- case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
- return Builder->getTrue();
- }
case ICmpInst::ICMP_ULT:
switch (PredR) {
default:
@@ -1860,15 +1843,9 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
break;
case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
- // If RHSC is [us]MAXINT, it is always false. Not handling
- // this can cause overflow.
- if (RHSC->isMaxValue(false))
- return LHS;
+ assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
false, false);
- case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
- return RHS;
}
break;
case ICmpInst::ICMP_SLT:
@@ -1878,39 +1855,9 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
break;
case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
- // If RHSC is [us]MAXINT, it is always false. Not handling
- // this can cause overflow.
- if (RHSC->isMaxValue(true))
- return LHS;
+ assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
false);
- case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
- return RHS;
- }
- break;
- case ICmpInst::ICMP_UGT:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
- case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
- case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
- return Builder->getTrue();
- }
- break;
- case ICmpInst::ICMP_SGT:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
- case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
- case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
- return Builder->getTrue();
}
break;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 4fd90d78a63b..6989d67f0060 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3619,7 +3619,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// then this one is redundant, and should be removed.
KnownBits Known(1);
computeKnownBits(IIOperand, Known, 0, II);
- if (Known.One.isAllOnesValue())
+ if (Known.isAllOnes())
return eraseInstFromFunction(*II);
// Update the cache of affected values for this assumption (we might be
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 60970775de63..34ce235b3fe2 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4050,7 +4050,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
// is set. If the comparison is against zero, then this is a check to see if
// *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0Known.Zero;
- if (~Op1Known.Zero == 0) {
+ if (Op1Known.isZero()) {
// If the LHS is an AND with the same constant, look through it.
Value *LHS = nullptr;
const APInt *LHSC;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 0195c5e727c9..05b01774cd5e 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -120,8 +120,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return nullptr;
}
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (DemandedMask == 0) // Not demanding any bits from V.
return UndefValue::get(VTy);
@@ -329,13 +328,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
case Instruction::Trunc: {
unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask = DemandedMask.zext(truncBf);
- Known.Zero = Known.Zero.zext(truncBf);
- Known.One = Known.One.zext(truncBf);
+ Known = Known.zext(truncBf);
if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
DemandedMask = DemandedMask.trunc(BitWidth);
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
break;
}
@@ -365,13 +362,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask = DemandedMask.trunc(SrcBitWidth);
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
DemandedMask = DemandedMask.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// The top bits are known to be zero.
Known.Zero.setBitsFrom(SrcBitWidth);
@@ -391,13 +386,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
InputDemandedBits.setBit(SrcBitWidth-1);
InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth);
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I, 0, InputDemandedBits, Known, Depth + 1))
return I;
InputDemandedBits = InputDemandedBits.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 1eb98b18bfb5..1792cb585f87 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2182,8 +2182,8 @@ Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
// determine the value. If so, constant fold it.
KnownBits Known(VTy->getPrimitiveSizeInBits());
computeKnownBits(ResultOp, Known, 0, &RI);
- if ((Known.Zero|Known.One).isAllOnesValue())
- RI.setOperand(0, Constant::getIntegerValue(VTy, Known.One));
+ if (Known.isConstant())
+ RI.setOperand(0, Constant::getIntegerValue(VTy, Known.getConstant()));
return nullptr;
}
@@ -2863,8 +2863,8 @@ bool InstCombiner::run() {
unsigned BitWidth = Ty->getScalarSizeInBits();
KnownBits Known(BitWidth);
computeKnownBits(I, Known, /*Depth*/0, I);
- if ((Known.Zero | Known.One).isAllOnesValue()) {
- Constant *C = ConstantInt::get(Ty, Known.One);
+ if (Known.isConstant()) {
+ Constant *C = ConstantInt::get(Ty, Known.getConstant());
DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
" from: " << *I << '\n');
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/contrib/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index 493d014586c6..96027bc3d0a9 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -70,13 +70,13 @@ static cl::opt<bool> DisableICP("disable-icp", cl::init(false), cl::Hidden,
// For debug use only.
static cl::opt<unsigned>
ICPCutOff("icp-cutoff", cl::init(0), cl::Hidden, cl::ZeroOrMore,
- cl::desc("Max number of promotions for this compilaiton"));
+ cl::desc("Max number of promotions for this compilation"));
// If ICPCSSkip is non zero, the first ICPCSSkip callsites will be skipped.
// For debug use only.
static cl::opt<unsigned>
ICPCSSkip("icp-csskip", cl::init(0), cl::Hidden, cl::ZeroOrMore,
- cl::desc("Skip Callsite up to this number for this compilaiton"));
+ cl::desc("Skip Callsite up to this number for this compilation"));
// Set if the pass is called in LTO optimization. The difference for LTO mode
// is the pass won't prefix the source module name to the internal linkage
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 8bdd917a0596..4bc0a7133118 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -151,6 +151,7 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
Options.TraceGep |= ClGEPTracing;
Options.TracePC |= ClExperimentalTracePC;
Options.TracePCGuard |= ClTracePCGuard;
+ Options.NoPrune |= !ClPruneBlocks;
return Options;
}
@@ -380,8 +381,10 @@ static bool isFullPostDominator(const BasicBlock *BB,
return true;
}
-static bool shouldInstrumentBlock(const Function& F, const BasicBlock *BB, const DominatorTree *DT,
- const PostDominatorTree *PDT) {
+static bool shouldInstrumentBlock(const Function &F, const BasicBlock *BB,
+ const DominatorTree *DT,
+ const PostDominatorTree *PDT,
+ const SanitizerCoverageOptions &Options) {
// Don't insert coverage for unreachable blocks: we will never call
// __sanitizer_cov() for them, so counting them in
// NumberOfInstrumentedBlocks() might complicate calculation of code coverage
@@ -395,7 +398,7 @@ static bool shouldInstrumentBlock(const Function& F, const BasicBlock *BB, const
if (BB->getFirstInsertionPt() == BB->end())
return false;
- if (!ClPruneBlocks || &F.getEntryBlock() == BB)
+ if (Options.NoPrune || &F.getEntryBlock() == BB)
return true;
return !(isFullDominator(BB, DT) || isFullPostDominator(BB, PDT));
@@ -434,7 +437,7 @@ bool SanitizerCoverageModule::runOnFunction(Function &F) {
&getAnalysis<PostDominatorTreeWrapperPass>(F).getPostDomTree();
for (auto &BB : F) {
- if (shouldInstrumentBlock(F, &BB, DT, PDT))
+ if (shouldInstrumentBlock(F, &BB, DT, PDT, Options))
BlocksToInstrument.push_back(&BB);
for (auto &Inst : BB) {
if (Options.IndirectCalls) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp b/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp
index 8a5af6195f1b..b105ece8dc7c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Float2Int.cpp
@@ -137,13 +137,13 @@ void Float2IntPass::findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots) {
}
// Helper - mark I as having been traversed, having range R.
-ConstantRange Float2IntPass::seen(Instruction *I, ConstantRange R) {
+void Float2IntPass::seen(Instruction *I, ConstantRange R) {
DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n");
- if (SeenInsts.find(I) != SeenInsts.end())
- SeenInsts.find(I)->second = R;
+ auto IT = SeenInsts.find(I);
+ if (IT != SeenInsts.end())
+ IT->second = std::move(R);
else
- SeenInsts.insert(std::make_pair(I, R));
- return R;
+ SeenInsts.insert(std::make_pair(I, std::move(R)));
}
// Helper - get a range representing a poison value.
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 7dacaba1193e..ae353ea44595 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -580,17 +580,17 @@ bool JumpThreadingPass::ComputeValueKnownInPredecessors(
// If comparing a live-in value against a constant, see if we know the
// live-in value on any predecessors.
- if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) {
+ if (isa<Constant>(Cmp->getOperand(1)) && !Cmp->getType()->isVectorTy()) {
+ Constant *CmpConst = cast<Constant>(Cmp->getOperand(1));
+
if (!isa<Instruction>(Cmp->getOperand(0)) ||
cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) {
- Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
-
for (BasicBlock *P : predecessors(BB)) {
// If the value is known by LazyValueInfo to be a constant in a
// predecessor, use that information to try to thread this block.
LazyValueInfo::Tristate Res =
LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0),
- RHSCst, P, BB, CxtI ? CxtI : Cmp);
+ CmpConst, P, BB, CxtI ? CxtI : Cmp);
if (Res == LazyValueInfo::Unknown)
continue;
@@ -603,21 +603,19 @@ bool JumpThreadingPass::ComputeValueKnownInPredecessors(
// Try to find a constant value for the LHS of a comparison,
// and evaluate it statically if we can.
- if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) {
- PredValueInfoTy LHSVals;
- ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
- WantInteger, CxtI);
-
- for (const auto &LHSVal : LHSVals) {
- Constant *V = LHSVal.first;
- Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
- V, CmpConst);
- if (Constant *KC = getKnownConstant(Folded, WantInteger))
- Result.push_back(std::make_pair(KC, LHSVal.second));
- }
+ PredValueInfoTy LHSVals;
+ ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
+ WantInteger, CxtI);
- return !Result.empty();
+ for (const auto &LHSVal : LHSVals) {
+ Constant *V = LHSVal.first;
+ Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
+ V, CmpConst);
+ if (Constant *KC = getKnownConstant(Folded, WantInteger))
+ Result.push_back(std::make_pair(KC, LHSVal.second));
}
+
+ return !Result.empty();
}
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 410fbb03068f..48d5ae88cda9 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -783,6 +783,11 @@ bool LoopIdiomRecognize::processLoopStridedStore(
if (NegStride)
Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
+ // TODO: ideally we should still be able to generate memset if SCEV expander
+ // is taught to generate the dependencies at the latest point.
+ if (!isSafeToExpand(Start, *SE))
+ return false;
+
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
// this into a memset in the loop preheader now if we want. However, this
// would be unsafe to do if there is anything else in the loop that may read
@@ -814,6 +819,11 @@ bool LoopIdiomRecognize::processLoopStridedStore(
SCEV::FlagNUW);
}
+ // TODO: ideally we should still be able to generate memset if SCEV expander
+ // is taught to generate the dependencies at the latest point.
+ if (!isSafeToExpand(NumBytesS, *SE))
+ return false;
+
Value *NumBytes =
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
diff --git a/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp b/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 62b5d80d611b..3c9850b156ac 100644
--- a/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -2494,12 +2494,11 @@ void NewGVN::verifyMemoryCongruency() const {
continue;
if (CC->getStoreCount() != 0) {
assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) &&
- "Any class with a store as a "
- "leader should have a "
- "representative stored value\n");
+ "Any class with a store as a leader should have a "
+ "representative stored value");
assert(CC->getMemoryLeader() &&
- "Any congruence class with a store should "
- "have a representative access\n");
+ "Any congruence class with a store should have a "
+ "representative access");
}
if (CC->getMemoryLeader())
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index a6b9fee1d8ac..bf54a51c7635 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -492,11 +492,10 @@ static CallInst *findTRECandidate(Instruction *TI,
return CI;
}
-static bool eliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
- BasicBlock *&OldEntry,
- bool &TailCallsAreMarkedTail,
- SmallVectorImpl<PHINode *> &ArgumentPHIs,
- bool CannotTailCallElimCallsMarkedTail) {
+static bool
+eliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs) {
// If we are introducing accumulator recursion to eliminate operations after
// the call instruction that are both associative and commutative, the initial
// value for the accumulator is placed in this variable. If this value is set
@@ -707,8 +706,7 @@ static bool foldReturnAndProcessPred(BasicBlock *BB, ReturnInst *Ret,
BB->eraseFromParent();
eliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail,
- ArgumentPHIs,
- CannotTailCallElimCallsMarkedTail);
+ ArgumentPHIs);
++NumRetDuped;
Change = true;
}
@@ -727,8 +725,7 @@ static bool processReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
return false;
return eliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail,
- ArgumentPHIs,
- CannotTailCallElimCallsMarkedTail);
+ ArgumentPHIs);
}
static bool eliminateTailRecursion(Function &F, const TargetTransformInfo *TTI) {
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 1956697ccb8b..ebde1f9a17dd 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -113,6 +113,7 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
bool Changed = false;
switch (TheLibFunc) {
case LibFunc_strlen:
+ case LibFunc_wcslen:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 7a3e8b9ae915..b44bc74d6551 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -596,7 +596,7 @@ private:
Span = Span.inverse();
// If there are a ton of values, we don't want to make a ginormous switch.
- if (Span.getSetSize().ugt(8) || Span.isEmptySet()) {
+ if (Span.isSizeLargerThan(8) || Span.isEmptySet()) {
return false;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index f77c10b6dd47..84d89f103a2f 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -121,6 +121,8 @@ public:
void addFlags(RemapFlags Flags);
+ void remapGlobalObjectMetadata(GlobalObject &GO);
+
Value *mapValue(const Value *V);
void remapInstruction(Instruction *I);
void remapFunction(Function &F);
@@ -802,6 +804,7 @@ void Mapper::flush() {
switch (E.Kind) {
case WorklistEntry::MapGlobalInit:
E.Data.GVInit.GV->setInitializer(mapConstant(E.Data.GVInit.Init));
+ remapGlobalObjectMetadata(*E.Data.GVInit.GV);
break;
case WorklistEntry::MapAppendingVar: {
unsigned PrefixSize = AppendingInits.size() - E.AppendingGVNumNewMembers;
@@ -892,6 +895,14 @@ void Mapper::remapInstruction(Instruction *I) {
I->mutateType(TypeMapper->remapType(I->getType()));
}
+void Mapper::remapGlobalObjectMetadata(GlobalObject &GO) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
+ GO.getAllMetadata(MDs);
+ GO.clearMetadata();
+ for (const auto &I : MDs)
+ GO.addMetadata(I.first, *cast<MDNode>(mapMetadata(I.second)));
+}
+
void Mapper::remapFunction(Function &F) {
// Remap the operands.
for (Use &Op : F.operands())
@@ -899,11 +910,7 @@ void Mapper::remapFunction(Function &F) {
Op = mapValue(Op);
// Remap the metadata attachments.
- SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
- F.getAllMetadata(MDs);
- F.clearMetadata();
- for (const auto &I : MDs)
- F.addMetadata(I.first, *cast<MDNode>(mapMetadata(I.second)));
+ remapGlobalObjectMetadata(F);
// Remap the argument types.
if (TypeMapper)
diff --git a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 87ce0194dad6..3fde0a453962 100644
--- a/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7178,7 +7178,7 @@ unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
if (VF == 1) {
Type *ValTy = getMemInstValueType(I);
unsigned Alignment = getMemInstAlignment(I);
- unsigned AS = getMemInstAlignment(I);
+ unsigned AS = getMemInstAddressSpace(I);
return TTI.getAddressComputationCost(ValTy) +
TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ODRHash.h b/contrib/llvm/tools/clang/include/clang/AST/ODRHash.h
index 9af8488fca10..e4cc12d35891 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ODRHash.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ODRHash.h
@@ -25,7 +25,7 @@ namespace clang {
class Decl;
class IdentifierInfo;
-class NestedNameSpecifer;
+class NestedNameSpecifier;
class Stmt;
class TemplateParameterList;
diff --git a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
index f11469b8fc20..0ab8d5fe4fc1 100644
--- a/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/contrib/llvm/tools/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -1223,6 +1223,20 @@ AST_MATCHER_P(InitListExpr, hasSyntacticForm,
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
+/// \brief Matches C++ initializer list expressions.
+///
+/// Given
+/// \code
+/// std::vector<int> a({ 1, 2, 3 });
+/// std::vector<int> b = { 4, 5 };
+/// int c[] = { 6, 7 };
+/// std::pair<int, int> d = { 8, 9 };
+/// \endcode
+/// cxxStdInitializerListExpr()
+/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
+const internal::VariadicDynCastAllOfMatcher<Stmt,
+ CXXStdInitializerListExpr> cxxStdInitializerListExpr;
+
/// \brief Matches implicit initializers of init list expressions.
///
/// Given
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
index 04a948a6c46e..3eeeb1bdc971 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -864,6 +864,13 @@ def OpenCLUnrollHint : InheritableAttr {
let Documentation = [OpenCLUnrollHintDocs];
}
+def OpenCLIntelReqdSubGroupSize: InheritableAttr {
+ let Spellings = [GNU<"intel_reqd_sub_group_size">];
+ let Args = [UnsignedArgument<"SubGroupSize">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Documentation = [OpenCLIntelReqdSubGroupSizeDocs];
+}
+
// This attribute is both a type attribute, and a declaration attribute (for
// parameter variables).
def OpenCLAccess : Attr {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm/tools/clang/include/clang/Basic/AttrDocs.td
index be2a91515ae8..9e2fdf4834aa 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AttrDocs.td
@@ -2216,6 +2216,21 @@ s6.11.5 for details.
}];
}
+def OpenCLIntelReqdSubGroupSizeDocs : Documentation {
+ let Category = DocCatStmt;
+ let Heading = "__attribute__((intel_reqd_sub_group_size))";
+ let Content = [{
+The optional attribute intel_reqd_sub_group_size can be used to indicate that
+the kernel must be compiled and executed with the specified subgroup size. When
+this attribute is present, get_max_sub_group_size() is guaranteed to return the
+specified integer value. This is important for the correctness of many subgroup
+algorithms, and in some cases may be used by the compiler to generate more optimal
+code. See `cl_intel_required_subgroup_size
+<https://www.khronos.org/registry/OpenCL/extensions/intel/cl_intel_required_subgroup_size.txt>`
+for details.
+ }];
+}
+
def OpenCLAccessDocs : Documentation {
let Category = DocCatStmt;
let Heading = "__read_only, __write_only, __read_write (read_only, write_only, read_write)";
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
index 6cc7308d4cbc..e8db347d4be5 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
@@ -25,11 +25,93 @@
// In libgcc
BUILTIN(__clear_cache, "vv*v*", "i")
+// 16-bit multiplications
+BUILTIN(__builtin_arm_smulbb, "iii", "nc")
+BUILTIN(__builtin_arm_smulbt, "iii", "nc")
+BUILTIN(__builtin_arm_smultb, "iii", "nc")
+BUILTIN(__builtin_arm_smultt, "iii", "nc")
+BUILTIN(__builtin_arm_smulwb, "iii", "nc")
+BUILTIN(__builtin_arm_smulwt, "iii", "nc")
+
// Saturating arithmetic
BUILTIN(__builtin_arm_qadd, "iii", "nc")
BUILTIN(__builtin_arm_qsub, "iii", "nc")
BUILTIN(__builtin_arm_ssat, "iiUi", "nc")
-BUILTIN(__builtin_arm_usat, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_usat, "UiiUi", "nc")
+
+BUILTIN(__builtin_arm_smlabb, "iiii", "nc")
+BUILTIN(__builtin_arm_smlabt, "iiii", "nc")
+BUILTIN(__builtin_arm_smlatb, "iiii", "nc")
+BUILTIN(__builtin_arm_smlatt, "iiii", "nc")
+BUILTIN(__builtin_arm_smlawb, "iiii", "nc")
+BUILTIN(__builtin_arm_smlawt, "iiii", "nc")
+
+BUILTIN(__builtin_arm_ssat16, "iii", "nc")
+BUILTIN(__builtin_arm_usat16, "iii", "nc")
+
+BUILTIN(__builtin_arm_sxtab16, "iii", "nc")
+BUILTIN(__builtin_arm_sxtb16, "ii", "nc")
+BUILTIN(__builtin_arm_uxtab16, "iii", "nc")
+BUILTIN(__builtin_arm_uxtb16, "ii", "nc")
+
+BUILTIN(__builtin_arm_sel, "iii", "nc")
+
+BUILTIN(__builtin_arm_qadd8, "iii", "nc")
+BUILTIN(__builtin_arm_qsub8, "iii", "nc")
+BUILTIN(__builtin_arm_sadd8, "iii", "nc")
+BUILTIN(__builtin_arm_shadd8, "iii", "nc")
+BUILTIN(__builtin_arm_shsub8, "iii", "nc")
+BUILTIN(__builtin_arm_ssub8, "iii", "nc")
+BUILTIN(__builtin_arm_uadd8, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uhadd8, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uhsub8, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uqadd8, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uqsub8, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_usub8, "UiUiUi", "nc")
+
+// Sum of 8-bit absolute differences
+BUILTIN(__builtin_arm_usad8, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_usada8, "UiUiUiUi", "nc")
+
+// Parallel 16-bit addition and subtraction
+BUILTIN(__builtin_arm_qadd16, "iii", "nc")
+BUILTIN(__builtin_arm_qasx, "iii", "nc")
+BUILTIN(__builtin_arm_qsax, "iii", "nc")
+BUILTIN(__builtin_arm_qsub16, "iii", "nc")
+BUILTIN(__builtin_arm_sadd16, "iii", "nc")
+BUILTIN(__builtin_arm_sasx, "iii", "nc")
+BUILTIN(__builtin_arm_shadd16, "iii", "nc")
+BUILTIN(__builtin_arm_shasx, "iii", "nc")
+BUILTIN(__builtin_arm_shsax, "iii", "nc")
+BUILTIN(__builtin_arm_shsub16, "iii", "nc")
+BUILTIN(__builtin_arm_ssax, "iii", "nc")
+BUILTIN(__builtin_arm_ssub16, "iii", "nc")
+BUILTIN(__builtin_arm_uadd16, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uasx, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uhadd16, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uhasx, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uhsax, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uhsub16, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uqadd16, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uqasx, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uqsax, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_uqsub16, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_usax, "UiUiUi", "nc")
+BUILTIN(__builtin_arm_usub16, "UiUiUi", "nc")
+
+// Parallel 16-bit multiplication
+BUILTIN(__builtin_arm_smlad, "iiii", "nc")
+BUILTIN(__builtin_arm_smladx, "iiii", "nc")
+BUILTIN(__builtin_arm_smlald, "LLiiiLLi", "nc")
+BUILTIN(__builtin_arm_smlaldx, "LLiiiLLi", "nc")
+BUILTIN(__builtin_arm_smlsd, "iiii", "nc")
+BUILTIN(__builtin_arm_smlsdx, "iiii", "nc")
+BUILTIN(__builtin_arm_smlsld, "LLiiiLLi", "nc")
+BUILTIN(__builtin_arm_smlsldx, "LLiiiLLi", "nc")
+BUILTIN(__builtin_arm_smuad, "iii", "nc")
+BUILTIN(__builtin_arm_smuadx, "iii", "nc")
+BUILTIN(__builtin_arm_smusd, "iii", "nc")
+BUILTIN(__builtin_arm_smusdx, "iii", "nc")
// Bit manipulation
BUILTIN(__builtin_arm_rbit, "UiUi", "nc")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
index c8a3c2f4d3ab..68b868ce8e6e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
@@ -668,6 +668,12 @@ TARGET_BUILTIN(__builtin_ia32_pext_si, "UiUiUi", "", "bmi2")
// TBM
TARGET_BUILTIN(__builtin_ia32_bextri_u32, "UiUiIUi", "", "tbm")
+// LWP
+TARGET_BUILTIN(__builtin_ia32_llwpcb, "vv*", "", "lwp")
+TARGET_BUILTIN(__builtin_ia32_slwpcb, "v*", "", "lwp")
+TARGET_BUILTIN(__builtin_ia32_lwpins32, "UcUiUiUi", "", "lwp")
+TARGET_BUILTIN(__builtin_ia32_lwpval32, "vUiUiUi", "", "lwp")
+
// SHA
TARGET_BUILTIN(__builtin_ia32_sha1rnds4, "V4iV4iV4iIc", "", "sha")
TARGET_BUILTIN(__builtin_ia32_sha1nexte, "V4iV4iV4i", "", "sha")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86_64.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86_64.def
index d38f522c3812..2851184c2c84 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86_64.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86_64.def
@@ -69,6 +69,8 @@ TARGET_BUILTIN(__builtin_ia32_bzhi_di, "ULLiULLiULLi", "", "bmi2")
TARGET_BUILTIN(__builtin_ia32_pdep_di, "ULLiULLiULLi", "", "bmi2")
TARGET_BUILTIN(__builtin_ia32_pext_di, "ULLiULLiULLi", "", "bmi2")
TARGET_BUILTIN(__builtin_ia32_bextri_u64, "ULLiULLiIULLi", "", "tbm")
+TARGET_BUILTIN(__builtin_ia32_lwpins64, "UcULLiUiUi", "", "lwp")
+TARGET_BUILTIN(__builtin_ia32_lwpval64, "vULLiUiUi", "", "lwp")
TARGET_BUILTIN(__builtin_ia32_pbroadcastq512_gpr_mask, "V8LLiLLiV8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pbroadcastq128_gpr_mask, "V2LLiULLiV2LLiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_pbroadcastq256_gpr_mask, "V4LLiULLiV4LLiUc","","avx512vl")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index cd284e94303b..77db8993f018 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -475,8 +475,6 @@ def warn_pragma_pop_macro_no_push : Warning<
def warn_pragma_message : Warning<"%0">,
InGroup<PoundPragmaMessage>, DefaultWarnNoWerror;
def err_pragma_message : Error<"%0">;
-def err_pragma_module_import_expected_module_name : Error<
- "expected %select{identifier in|'.' or end of directive after}0 module name">;
def warn_pragma_ignored : Warning<"unknown pragma ignored">,
InGroup<UnknownPragmas>, DefaultIgnore;
def ext_stdc_pragma_ignored : ExtWarn<"unknown pragma in STDC namespace">,
@@ -511,6 +509,22 @@ def warn_pragma_debug_unexpected_command : Warning<
"unexpected debug command '%0'">, InGroup<IgnoredPragmas>;
def warn_pragma_debug_missing_argument : Warning<
"missing argument to debug command '%0'">, InGroup<IgnoredPragmas>;
+// #pragma module
+def err_pp_expected_module_name : Error<
+ "expected %select{identifier after '.' in |}0module name">;
+def err_pp_module_begin_wrong_module : Error<
+ "must specify '-fmodule-name=%0' to enter %select{|submodule of }1"
+ "this module%select{ (current module is %3)|}2">;
+def err_pp_module_begin_no_module_map : Error<
+ "no module map available for module %0">;
+def err_pp_module_begin_no_submodule : Error<
+ "submodule %0.%1 not declared in module map">;
+def err_pp_module_begin_without_module_end : Error<
+ "no matching '#pragma clang module end' for this "
+ "'#pragma clang module begin'">;
+def err_pp_module_end_without_module_begin : Error<
+ "no matching '#pragma clang module begin' for this "
+ "'#pragma clang module end'">;
def err_defined_macro_name : Error<"'defined' cannot be used as a macro name">;
def err_paste_at_start : Error<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 6a3a2124a5ff..a0c0e5f86449 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -730,6 +730,9 @@ def err_super_in_lambda_unsupported : Error<
def warn_pragma_unused_undeclared_var : Warning<
"undeclared variable %0 used as an argument for '#pragma unused'">,
InGroup<IgnoredPragmas>;
+def warn_atl_uuid_deprecated : Warning<
+ "specifying 'uuid' as an ATL attribute is deprecated; use __declspec instead">,
+ InGroup<DeprecatedDeclarations>;
def warn_pragma_unused_expected_var_arg : Warning<
"only variables can be arguments to '#pragma unused'">,
InGroup<IgnoredPragmas>;
@@ -2088,7 +2091,7 @@ def err_enum_invalid_underlying : Error<
"non-integral type %0 is an invalid underlying type">;
def err_enumerator_too_large : Error<
"enumerator value is not representable in the underlying type %0">;
-def ext_enumerator_too_large : ExtWarn<
+def ext_enumerator_too_large : Extension<
"enumerator value is not representable in the underlying type %0">,
InGroup<MicrosoftEnumValue>;
def err_enumerator_wrapped : Error<
@@ -2868,7 +2871,8 @@ def warn_partial_availability : Warning<"%0 is only available conditionally">,
def note_partial_availability_silence : Note<
"explicitly redeclare %0 to silence this warning">;
def note_unguarded_available_silence : Note<
- "enclose %0 in an @available check to silence this warning">;
+ "enclose %0 in %select{an @available|a __builtin_available}1 check to silence"
+ " this warning">;
def warn_partial_message : Warning<"%0 is partial: %1">,
InGroup<UnguardedAvailability>, DefaultIgnore;
def warn_partial_fwdclass_message : Warning<
@@ -4759,7 +4763,7 @@ def ext_forward_ref_enum : Extension<
"ISO C forbids forward references to 'enum' types">;
def err_forward_ref_enum : Error<
"ISO C++ forbids forward references to 'enum' types">;
-def ext_ms_forward_ref_enum : Extension<
+def ext_ms_forward_ref_enum : ExtWarn<
"forward references to 'enum' types are a Microsoft extension">,
InGroup<MicrosoftEnumForwardReference>;
def ext_forward_ref_enum_def : Extension<
@@ -7920,7 +7924,11 @@ def warn_empty_switch_body : Warning<
def note_empty_body_on_separate_line : Note<
"put the semicolon on a separate line to silence this warning">;
-def err_va_start_used_in_non_variadic_function : Error<
+def err_va_start_captured_stmt : Error<
+ "'va_start' cannot be used in a captured statement">;
+def err_va_start_outside_function : Error<
+ "'va_start' cannot be used outside a function">;
+def err_va_start_fixed_function : Error<
"'va_start' used in function with fixed args">;
def err_va_start_used_in_wrong_abi_function : Error<
"'va_start' used in %select{System V|Win64}0 ABI function">;
@@ -8297,6 +8305,8 @@ def err_sampler_argument_required : Error<
"sampler_t variable required - got %0">;
def err_wrong_sampler_addressspace: Error<
"sampler type cannot be used with the __local and __global address space qualifiers">;
+def err_opencl_nonconst_global_sampler : Error<
+ "global sampler requires a const or constant address space qualifier">;
def err_opencl_cast_non_zero_to_event_t : Error<
"cannot cast non-zero value '%0' to 'event_t'">;
def err_opencl_global_invalid_addr_space : Error<
@@ -8971,6 +8981,9 @@ def warn_nullability_lost : Warning<
"implicit conversion from nullable pointer %0 to non-nullable pointer "
"type %1">,
InGroup<NullableToNonNullConversion>, DefaultIgnore;
+def warn_zero_as_null_pointer_constant : Warning<
+ "zero as null pointer constant">,
+ InGroup<DiagGroup<"zero-as-null-pointer-constant">>, DefaultIgnore;
def err_nullability_cs_multilevel : Error<
"nullability keyword %0 cannot be applied to multi-level pointer type %1">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
index 63348c094c93..bd2062d967b4 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -297,6 +297,9 @@ def fsanitize_coverage_trace_pc
def fsanitize_coverage_trace_pc_guard
: Flag<["-"], "fsanitize-coverage-trace-pc-guard">,
HelpText<"Enable PC tracing with guard in sanitizer coverage">;
+def fsanitize_coverage_no_prune
+ : Flag<["-"], "fsanitize-coverage-no-prune">,
+ HelpText<"Disable coverage pruning (i.e. instrument all blocks/edges)">;
def fprofile_instrument_EQ : Joined<["-"], "fprofile-instrument=">,
HelpText<"Enable PGO instrumentation. The accepted value is clang, llvm, "
"or none">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Distro.h b/contrib/llvm/tools/clang/include/clang/Driver/Distro.h
index e2fb8b643350..fab49862a442 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Distro.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Distro.h
@@ -57,6 +57,7 @@ public:
UbuntuXenial,
UbuntuYakkety,
UbuntuZesty,
+ UbuntuArtful,
UnknownDistro
};
@@ -110,9 +111,9 @@ public:
}
bool IsUbuntu() const {
- return DistroVal >= UbuntuHardy && DistroVal <= UbuntuZesty;
+ return DistroVal >= UbuntuHardy && DistroVal <= UbuntuArtful;
}
-
+
/// @}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Multilib.h b/contrib/llvm/tools/clang/include/clang/Driver/Multilib.h
index 0419186b745d..36d2493b1afc 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Multilib.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Multilib.h
@@ -70,13 +70,21 @@ public:
/// All elements begin with either '+' or '-'
const flags_list &flags() const { return Flags; }
flags_list &flags() { return Flags; }
+
/// Add a flag to the flags list
+ /// \p Flag must be a flag accepted by the driver with its leading '-' removed,
+ /// and replaced with either:
+ /// '-' which contraindicates using this multilib with that flag
+ /// or:
+ /// '+' which promotes using this multilib in the presence of that flag
+ /// otherwise '-print-multi-lib' will not emit them correctly.
Multilib &flag(StringRef F) {
assert(F.front() == '+' || F.front() == '-');
Flags.push_back(F);
return *this;
}
+ LLVM_DUMP_METHOD void dump() const;
/// \brief print summary of the Multilib
void print(raw_ostream &OS) const;
@@ -150,6 +158,7 @@ public:
unsigned size() const { return Multilibs.size(); }
+ LLVM_DUMP_METHOD void dump() const;
void print(raw_ostream &OS) const;
MultilibSet &setIncludeDirsCallback(IncludeDirsFunc F) {
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index 1272a36ecc70..31015228f362 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -1688,6 +1688,8 @@ def mllvm : Separate<["-"], "mllvm">, Flags<[CC1Option,CC1AsOption,CoreOption]>,
HelpText<"Additional arguments to forward to LLVM's option processing">;
def mmacosx_version_min_EQ : Joined<["-"], "mmacosx-version-min=">,
Group<m_Group>, HelpText<"Set Mac OS X deployment target">;
+def mmacos_version_min_EQ : Joined<["-"], "mmacos-version-min=">,
+ Group<m_Group>, Alias<mmacosx_version_min_EQ>;
def mms_bitfields : Flag<["-"], "mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard">;
def mno_ms_bitfields : Flag<["-"], "mno-ms-bitfields">, Group<m_Group>,
@@ -1750,6 +1752,7 @@ def mno_bmi : Flag<["-"], "mno-bmi">, Group<m_x86_Features_Group>;
def mno_bmi2 : Flag<["-"], "mno-bmi2">, Group<m_x86_Features_Group>;
def mno_popcnt : Flag<["-"], "mno-popcnt">, Group<m_x86_Features_Group>;
def mno_tbm : Flag<["-"], "mno-tbm">, Group<m_x86_Features_Group>;
+def mno_lwp : Flag<["-"], "mno-lwp">, Group<m_x86_Features_Group>;
def mno_fma4 : Flag<["-"], "mno-fma4">, Group<m_x86_Features_Group>;
def mno_fma : Flag<["-"], "mno-fma">, Group<m_x86_Features_Group>;
def mno_xop : Flag<["-"], "mno-xop">, Group<m_x86_Features_Group>;
@@ -1949,6 +1952,7 @@ def mbmi : Flag<["-"], "mbmi">, Group<m_x86_Features_Group>;
def mbmi2 : Flag<["-"], "mbmi2">, Group<m_x86_Features_Group>;
def mpopcnt : Flag<["-"], "mpopcnt">, Group<m_x86_Features_Group>;
def mtbm : Flag<["-"], "mtbm">, Group<m_x86_Features_Group>;
+def mlwp : Flag<["-"], "mlwp">, Group<m_x86_Features_Group>;
def mfma4 : Flag<["-"], "mfma4">, Group<m_x86_Features_Group>;
def mfma : Flag<["-"], "mfma">, Group<m_x86_Features_Group>;
def mxop : Flag<["-"], "mxop">, Group<m_x86_Features_Group>;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def
index 0b7d466ab3d4..7495ad808c99 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.def
@@ -160,6 +160,7 @@ CODEGENOPT(SanitizeCoverageTracePC, 1, 0) ///< Enable PC tracing
///< in sanitizer coverage.
CODEGENOPT(SanitizeCoverageTracePCGuard, 1, 0) ///< Enable PC tracing with guard
///< in sanitizer coverage.
+CODEGENOPT(SanitizeCoverageNoPrune, 1, 0) ///< Disable coverage pruning.
CODEGENOPT(SanitizeStats , 1, 0) ///< Collect statistics for sanitizers.
CODEGENOPT(SimplifyLibCalls , 1, 1) ///< Set when -fbuiltin is enabled.
CODEGENOPT(SoftFloat , 1, 0) ///< -soft-float.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
index 384499a3d82a..8d690a448f85 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -146,6 +146,8 @@ public:
return *CurrentASTUnit;
}
+ Module *getCurrentModule() const;
+
std::unique_ptr<ASTUnit> takeCurrentASTUnit() {
return std::move(CurrentASTUnit);
}
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
index 64c6f2a03f2b..6e24e1893ab6 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
@@ -538,9 +538,15 @@ public:
///
/// \param File The module map file.
/// \param IsSystem Whether this file is in a system header directory.
+ /// \param ID If the module map file is already mapped (perhaps as part of
+ /// processing a preprocessed module), the ID of the file.
+ /// \param Offset [inout] An offset within ID to start parsing. On exit,
+ /// filled by the end of the parsed contents (either EOF or the
+ /// location of an end-of-module-map pragma).
///
/// \returns true if an error occurred, false otherwise.
- bool loadModuleMapFile(const FileEntry *File, bool IsSystem);
+ bool loadModuleMapFile(const FileEntry *File, bool IsSystem,
+ FileID ID = FileID(), unsigned *Offset = nullptr);
/// \brief Collect the set of all known, top-level modules.
///
@@ -686,7 +692,9 @@ private:
LoadModuleMapResult loadModuleMapFileImpl(const FileEntry *File,
bool IsSystem,
- const DirectoryEntry *Dir);
+ const DirectoryEntry *Dir,
+ FileID ID = FileID(),
+ unsigned *Offset = nullptr);
/// \brief Try to load the module map file in the given directory.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
index 6ac6316d1248..3be733167e5c 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -478,6 +478,11 @@ public:
return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
}
+ /// Returns the leading whitespace for line that corresponds to the given
+ /// location \p Loc.
+ static StringRef getIndentationForLine(SourceLocation Loc,
+ const SourceManager &SM);
+
//===--------------------------------------------------------------------===//
// Internal implementation interfaces.
private:
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
index 46136725d87a..0fd6abe2f7d0 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
@@ -546,14 +546,20 @@ public:
/// \param HomeDir The directory in which relative paths within this module
/// map file will be resolved.
///
+ /// \param ID The FileID of the file to process, if we've already entered it.
+ ///
+ /// \param Offset [inout] On input the offset at which to start parsing. On
+ /// output, the offset at which the module map terminated.
+ ///
/// \param ExternModuleLoc The location of the "extern module" declaration
/// that caused us to load this module map file, if any.
///
/// \returns true if an error occurred, false otherwise.
bool parseModuleMapFile(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *HomeDir,
+ const DirectoryEntry *HomeDir, FileID ID = FileID(),
+ unsigned *Offset = nullptr,
SourceLocation ExternModuleLoc = SourceLocation());
-
+
/// \brief Dump the contents of the module map, for debugging purposes.
void dump();
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
index b1a7325c3426..0e3f563785d4 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -324,7 +324,7 @@ class Preprocessor {
/// \brief If the current lexer is for a submodule that is being built, this
/// is that submodule.
- Module *CurSubmodule;
+ Module *CurLexerSubmodule;
/// \brief Keeps track of the stack of files currently
/// \#included, and macros currently being expanded from, not counting
@@ -507,16 +507,19 @@ class Preprocessor {
/// \brief Information about a submodule that we're currently building.
struct BuildingSubmoduleInfo {
- BuildingSubmoduleInfo(Module *M, SourceLocation ImportLoc,
+ BuildingSubmoduleInfo(Module *M, SourceLocation ImportLoc, bool IsPragma,
SubmoduleState *OuterSubmoduleState,
unsigned OuterPendingModuleMacroNames)
- : M(M), ImportLoc(ImportLoc), OuterSubmoduleState(OuterSubmoduleState),
+ : M(M), ImportLoc(ImportLoc), IsPragma(IsPragma),
+ OuterSubmoduleState(OuterSubmoduleState),
OuterPendingModuleMacroNames(OuterPendingModuleMacroNames) {}
/// The module that we are building.
Module *M;
/// The location at which the module was included.
SourceLocation ImportLoc;
+ /// Whether we entered this submodule via a pragma.
+ bool IsPragma;
/// The previous SubmoduleState.
SubmoduleState *OuterSubmoduleState;
/// The number of pending module macro names when we started building this.
@@ -773,8 +776,9 @@ public:
/// expansions going on at the time.
PreprocessorLexer *getCurrentFileLexer() const;
- /// \brief Return the submodule owning the file being lexed.
- Module *getCurrentSubmodule() const { return CurSubmodule; }
+ /// \brief Return the submodule owning the file being lexed. This may not be
+ /// the current module if we have changed modules since entering the file.
+ Module *getCurrentLexerSubmodule() const { return CurLexerSubmodule; }
/// \brief Returns the FileID for the preprocessor predefines.
FileID getPredefinesFileID() const { return PredefinesFileID; }
@@ -1726,13 +1730,16 @@ public:
bool CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
bool *ShadowFlag = nullptr);
-private:
+ void EnterSubmodule(Module *M, SourceLocation ImportLoc, bool ForPragma);
+ Module *LeaveSubmodule(bool ForPragma);
+private:
void PushIncludeMacroStack() {
assert(CurLexerKind != CLK_CachingLexer && "cannot push a caching lexer");
- IncludeMacroStack.emplace_back(
- CurLexerKind, CurSubmodule, std::move(CurLexer), std::move(CurPTHLexer),
- CurPPLexer, std::move(CurTokenLexer), CurDirLookup);
+ IncludeMacroStack.emplace_back(CurLexerKind, CurLexerSubmodule,
+ std::move(CurLexer), std::move(CurPTHLexer),
+ CurPPLexer, std::move(CurTokenLexer),
+ CurDirLookup);
CurPPLexer = nullptr;
}
@@ -1742,16 +1749,13 @@ private:
CurPPLexer = IncludeMacroStack.back().ThePPLexer;
CurTokenLexer = std::move(IncludeMacroStack.back().TheTokenLexer);
CurDirLookup = IncludeMacroStack.back().TheDirLookup;
- CurSubmodule = IncludeMacroStack.back().TheSubmodule;
+ CurLexerSubmodule = IncludeMacroStack.back().TheSubmodule;
CurLexerKind = IncludeMacroStack.back().CurLexerKind;
IncludeMacroStack.pop_back();
}
void PropagateLineStartLeadingSpaceInfo(Token &Result);
- void EnterSubmodule(Module *M, SourceLocation ImportLoc);
- void LeaveSubmodule();
-
/// Determine whether we need to create module macros for #defines in the
/// current context.
bool needModuleMacros() const;
@@ -1967,7 +1971,6 @@ public:
void HandlePragmaPoison();
void HandlePragmaSystemHeader(Token &SysHeaderTok);
void HandlePragmaDependency(Token &DependencyTok);
- void HandlePragmaModuleImport(Token &Tok);
void HandlePragmaPushMacro(Token &Tok);
void HandlePragmaPopMacro(Token &Tok);
void HandlePragmaIncludeAlias(Token &Tok);
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
index eca383bee2f5..e5961079f7c2 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -3766,6 +3766,9 @@ public:
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
+ /// Warn when implicitly casting 0 to nullptr.
+ void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
+
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/FixIt.h b/contrib/llvm/tools/clang/include/clang/Tooling/FixIt.h
index e2259d4357bc..c1e508849280 100644
--- a/contrib/llvm/tools/clang/include/clang/Tooling/FixIt.h
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/FixIt.h
@@ -65,6 +65,13 @@ FixItHint createReplacement(const D &Destination, const S &Source,
getText(Source, Context));
}
+// \brief Returns a FixItHint to replace \p Destination by \p Source.
+template <typename D>
+FixItHint createReplacement(const D &Destination, StringRef Source) {
+ return FixItHint::CreateReplacement(internal::getSourceRange(Destination),
+ Source);
+}
+
} // end namespace fixit
} // end namespace tooling
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 6f935620888f..26743d86f5e7 100644
--- a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -153,6 +153,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxRecordDecl);
REGISTER_MATCHER(cxxReinterpretCastExpr);
REGISTER_MATCHER(cxxStaticCastExpr);
+ REGISTER_MATCHER(cxxStdInitializerListExpr);
REGISTER_MATCHER(cxxTemporaryObjectExpr);
REGISTER_MATCHER(cxxThisExpr);
REGISTER_MATCHER(cxxThrowExpr);
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
index 6bdef78c074f..c355445dc1e7 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -146,10 +146,9 @@ void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
}
void DiagnosticsEngine::ReportDelayed() {
- Report(DelayedDiagID) << DelayedDiagArg1 << DelayedDiagArg2;
+ unsigned ID = DelayedDiagID;
DelayedDiagID = 0;
- DelayedDiagArg1.clear();
- DelayedDiagArg2.clear();
+ Report(ID) << DelayedDiagArg1 << DelayedDiagArg2;
}
void DiagnosticsEngine::DiagStateMap::appendFirst(
@@ -420,11 +419,10 @@ bool DiagnosticsEngine::EmitCurrentDiagnostic(bool Force) {
}
// Clear out the current diagnostic object.
- unsigned DiagID = CurDiagID;
Clear();
// If there was a delayed diagnostic, emit it now.
- if (!Force && DelayedDiagID && DelayedDiagID != DiagID)
+ if (!Force && DelayedDiagID)
ReportDelayed();
return Emitted;
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
index 2852b40026c2..ce493c1e5cab 100644
--- a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -666,6 +666,10 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
}
}
+ // Make sure we set FatalErrorOccurred to ensure that the notes from the
+ // diagnostic that caused `fatal_too_many_errors` won't be emitted.
+ if (Diag.CurDiagID == diag::fatal_too_many_errors)
+ Diag.FatalErrorOccurred = true;
// Finally, report it.
EmitDiag(Diag, DiagLevel);
return true;
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index 78b03b1c314a..33eb0b05ddcd 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -2591,6 +2591,7 @@ class X86TargetInfo : public TargetInfo {
bool HasRDSEED = false;
bool HasADX = false;
bool HasTBM = false;
+ bool HasLWP = false;
bool HasFMA = false;
bool HasF16C = false;
bool HasAVX512CD = false;
@@ -3363,6 +3364,7 @@ bool X86TargetInfo::initFeatureMap(
case CK_BDVER1:
// xop implies avx, sse4a and fma4.
setFeatureEnabledImpl(Features, "xop", true);
+ setFeatureEnabledImpl(Features, "lwp", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
@@ -3634,6 +3636,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasADX = true;
} else if (Feature == "+tbm") {
HasTBM = true;
+ } else if (Feature == "+lwp") {
+ HasLWP = true;
} else if (Feature == "+fma") {
HasFMA = true;
} else if (Feature == "+f16c") {
@@ -3949,6 +3953,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasTBM)
Builder.defineMacro("__TBM__");
+ if (HasLWP)
+ Builder.defineMacro("__LWP__");
+
if (HasMWAITX)
Builder.defineMacro("__MWAITX__");
@@ -4132,6 +4139,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4.2", SSELevel >= SSE42)
.Case("sse4a", XOPLevel >= SSE4A)
.Case("tbm", HasTBM)
+ .Case("lwp", HasLWP)
.Case("x86", true)
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
@@ -5443,6 +5451,7 @@ public:
.Case("softfloat", SoftFloat)
.Case("thumb", isThumb())
.Case("neon", (FPU & NeonFPU) && !SoftFloat)
+ .Case("vfp", FPU && !SoftFloat)
.Case("hwdiv", HWDiv & HWDivThumb)
.Case("hwdiv-arm", HWDiv & HWDivARM)
.Default(false);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
index 03883805199f..0d96f2efa60a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -185,6 +185,7 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
Opts.Use8bitCounters = CGOpts.SanitizeCoverage8bitCounters;
Opts.TracePC = CGOpts.SanitizeCoverageTracePC;
Opts.TracePCGuard = CGOpts.SanitizeCoverageTracePCGuard;
+ Opts.NoPrune = CGOpts.SanitizeCoverageNoPrune;
PM.add(createSanitizerCoverageModulePass(Opts));
}
@@ -974,10 +975,14 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
// via a WriteIndexesThinBackend.
FunctionImporter::ImportMapTy ImportList;
for (auto &GlobalList : *CombinedIndex) {
+ // Ignore entries for undefined references.
+ if (GlobalList.second.SummaryList.empty())
+ continue;
+
auto GUID = GlobalList.first;
- assert(GlobalList.second.size() == 1 &&
+ assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected individual combined index to have one summary per GUID");
- auto &Summary = GlobalList.second[0];
+ auto &Summary = GlobalList.second.SummaryList[0];
// Skip the summaries for the importing module. These are included to
// e.g. record required linkage changes.
if (Summary->modulePath() == M->getModuleIdentifier())
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index 791a57e61f53..2b2a92dd6019 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -623,9 +623,13 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
// For const-qualified captures, emit clang.arc.use to ensure the captured
// object doesn't get released while we are still depending on its validity
// within the block.
- if (VT.isConstQualified() && VT.getObjCLifetime() == Qualifiers::OCL_Strong)
+ if (VT.isConstQualified() &&
+ VT.getObjCLifetime() == Qualifiers::OCL_Strong &&
+ CGF.CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ assert(CGF.CGM.getLangOpts().ObjCAutoRefCount &&
+ "expected ObjC ARC to be enabled");
destroyer = CodeGenFunction::emitARCIntrinsicUse;
- else if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ } else if (dtorKind == QualType::DK_objc_strong_lifetime) {
destroyer = CodeGenFunction::destroyARCStrongImprecise;
} else {
destroyer = CGF.getDestroyer(dtorKind);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index 6ea0a325a429..2f05c0e910e5 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -2751,7 +2751,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Push a clang.arc.use cleanup for each object in RetainableOperands. The
// cleanup will cause the use to appear after the final log call, keeping
- // the object valid while it’s held in the log buffer. Note that if there’s
+ // the object valid while it's held in the log buffer. Note that if there's
// a release cleanup on the object, it will already be active; since
// cleanups are emitted in reverse order, the use will occur before the
// object is released.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 6e6eb7d7f13c..7b42850df968 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -658,34 +658,42 @@ void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext());
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
- QualType hintQTy = A->getTypeHint();
- const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
- bool isSignedInteger =
- hintQTy->isSignedIntegerType() ||
- (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
- llvm::Metadata *attrMDArgs[] = {
+ QualType HintQTy = A->getTypeHint();
+ const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
+ bool IsSignedInteger =
+ HintQTy->isSignedIntegerType() ||
+ (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
+ llvm::Metadata *AttrMDArgs[] = {
llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
CGM.getTypes().ConvertType(A->getTypeHint()))),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
llvm::IntegerType::get(Context, 32),
- llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
- Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, attrMDArgs));
+ llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
+ Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
}
if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
- llvm::Metadata *attrMDArgs[] = {
+ llvm::Metadata *AttrMDArgs[] = {
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
- Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, attrMDArgs));
+ Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
}
if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
- llvm::Metadata *attrMDArgs[] = {
+ llvm::Metadata *AttrMDArgs[] = {
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
- Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, attrMDArgs));
+ Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
+ }
+
+ if (const OpenCLIntelReqdSubGroupSizeAttr *A =
+ FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
+ llvm::Metadata *AttrMDArgs[] = {
+ llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
+ Fn->setMetadata("intel_reqd_sub_group_size",
+ llvm::MDNode::get(Context, AttrMDArgs));
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index b69640894f11..459841aee5a2 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -1413,16 +1413,8 @@ private:
/// True if we need emit the life-time markers.
const bool ShouldEmitLifetimeMarkers;
- /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
- /// In the kernel metadata node, reference the kernel function and metadata
- /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
- /// - A node for the vec_type_hint(<type>) qualifier contains string
- /// "vec_type_hint", an undefined value of the <type> data type,
- /// and a Boolean that is true if the <type> is integer and signed.
- /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
- /// "work_group_size_hint", and three 32-bit integers X, Y and Z.
- /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
- /// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
+ /// Add OpenCL kernel arg metadata and the kernel attribute meatadata to
+ /// the function metadata.
void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index ecd81d84b1fa..4ebbef7dfb5b 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -4890,10 +4890,16 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
// Empty records are always ignored on Darwin, but actually passed in C++ mode
// elsewhere for GNU compatibility.
- if (isEmptyRecord(getContext(), Ty, true)) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
+ if (IsEmpty || Size == 0) {
if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
return ABIArgInfo::getIgnore();
+ // GNU C mode. The only argument that gets ignored is an empty one with size
+ // 0.
+ if (IsEmpty && Size == 0)
+ return ABIArgInfo::getIgnore();
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
}
@@ -4906,7 +4912,6 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
}
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
- uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 128) {
// On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
// same size and alignment.
@@ -4946,7 +4951,8 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
: ABIArgInfo::getDirect());
}
- if (isEmptyRecord(getContext(), RetTy, true))
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
return ABIArgInfo::getIgnore();
const Type *Base = nullptr;
@@ -4956,7 +4962,6 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
// Aggregates <= 16 bytes are returned directly in registers or on the stack.
- uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 128) {
// On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
// same size and alignment.
diff --git a/contrib/llvm/tools/clang/lib/Driver/Distro.cpp b/contrib/llvm/tools/clang/lib/Driver/Distro.cpp
index d305b179449f..2df297f3cfc2 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Distro.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Distro.cpp
@@ -47,6 +47,7 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
.Case("xenial", Distro::UbuntuXenial)
.Case("yakkety", Distro::UbuntuYakkety)
.Case("zesty", Distro::UbuntuZesty)
+ .Case("artful", Distro::UbuntuArtful)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp b/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp
index 43b62f7b3612..16a81603b31e 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp
@@ -80,6 +80,10 @@ Multilib &Multilib::includeSuffix(StringRef S) {
return *this;
}
+LLVM_DUMP_METHOD void Multilib::dump() const {
+ print(llvm::errs());
+}
+
void Multilib::print(raw_ostream &OS) const {
assert(GCCSuffix.empty() || (StringRef(GCCSuffix).front() == '/'));
if (GCCSuffix.empty())
@@ -270,6 +274,10 @@ bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
return false;
}
+LLVM_DUMP_METHOD void MultilibSet::dump() const {
+ print(llvm::errs());
+}
+
void MultilibSet::print(raw_ostream &OS) const {
for (const Multilib &M : *this)
OS << M << "\n";
diff --git a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
index 4dd4929c9148..c298302c477c 100644
--- a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp
@@ -55,6 +55,7 @@ enum CoverageFeature {
Coverage8bitCounters = 1 << 8,
CoverageTracePC = 1 << 9,
CoverageTracePCGuard = 1 << 10,
+ CoverageNoPrune = 1 << 11,
};
/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
@@ -629,7 +630,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
std::make_pair(CoverageTraceGep, "-fsanitize-coverage-trace-gep"),
std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters"),
std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc"),
- std::make_pair(CoverageTracePCGuard, "-fsanitize-coverage-trace-pc-guard")};
+ std::make_pair(CoverageTracePCGuard, "-fsanitize-coverage-trace-pc-guard"),
+ std::make_pair(CoverageNoPrune, "-fsanitize-coverage-no-prune")};
for (auto F : CoverageFlags) {
if (CoverageFeatures & F.first)
CmdArgs.push_back(Args.MakeArgString(F.second));
@@ -786,6 +788,7 @@ int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
.Case("8bit-counters", Coverage8bitCounters)
.Case("trace-pc", CoverageTracePC)
.Case("trace-pc-guard", CoverageTracePCGuard)
+ .Case("no-prune", CoverageNoPrune)
.Default(0);
if (F == 0)
D.Diag(clang::diag::err_drv_unsupported_option_argument)
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/CrossWindows.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/CrossWindows.cpp
index b030c636adab..d290c62a056a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/CrossWindows.cpp
@@ -204,7 +204,7 @@ CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
const llvm::Triple &T,
const llvm::opt::ArgList &Args)
: Generic_GCC(D, T, Args) {
- if (GetCXXStdlibType(Args) == ToolChain::CST_Libstdcxx) {
+ if (D.CCCIsCXX() && GetCXXStdlibType(Args) == ToolChain::CST_Libstdcxx) {
const std::string &SysRoot = D.SysRoot;
// libstdc++ resides in /usr/lib, but depends on libgcc which is placed in
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
index d29d826b5f44..f1015e62eec8 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -893,6 +893,8 @@ static bool isSoftFloatABI(const ArgList &Args) {
A->getValue() == StringRef("soft"));
}
+/// \p Flag must be a flag accepted by the driver with its leading '-' removed,
+// otherwise '-print-multi-lib' will not emit them correctly.
static void addMultilibFlag(bool Enabled, const char *const Flag,
std::vector<std::string> &Flags) {
if (Enabled)
@@ -1437,17 +1439,17 @@ static void findAndroidArmMultilibs(const Driver &D,
// Find multilibs with subdirectories like armv7-a, thumb, armv7-a/thumb.
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
Multilib ArmV7Multilib = makeMultilib("/armv7-a")
- .flag("+armv7")
- .flag("-thumb");
+ .flag("+march=armv7-a")
+ .flag("-mthumb");
Multilib ThumbMultilib = makeMultilib("/thumb")
- .flag("-armv7")
- .flag("+thumb");
+ .flag("-march=armv7-a")
+ .flag("+mthumb");
Multilib ArmV7ThumbMultilib = makeMultilib("/armv7-a/thumb")
- .flag("+armv7")
- .flag("+thumb");
+ .flag("+march=armv7-a")
+ .flag("+mthumb");
Multilib DefaultMultilib = makeMultilib("")
- .flag("-armv7")
- .flag("-thumb");
+ .flag("-march=armv7-a")
+ .flag("-mthumb");
MultilibSet AndroidArmMultilibs =
MultilibSet()
.Either(ThumbMultilib, ArmV7Multilib,
@@ -1465,8 +1467,8 @@ static void findAndroidArmMultilibs(const Driver &D,
bool IsArmV7Mode = (IsArmArch || IsThumbArch) &&
(llvm::ARM::parseArchVersion(Arch) == 7 ||
(IsArmArch && Arch == "" && IsV7SubArch));
- addMultilibFlag(IsArmV7Mode, "armv7", Flags);
- addMultilibFlag(IsThumbMode, "thumb", Flags);
+ addMultilibFlag(IsArmV7Mode, "march=armv7-a", Flags);
+ addMultilibFlag(IsThumbMode, "mthumb", Flags);
if (AndroidArmMultilibs.select(Flags, Result.SelectedMultilib))
Result.Multilibs = AndroidArmMultilibs;
diff --git a/contrib/llvm/tools/clang/lib/Format/FormatToken.h b/contrib/llvm/tools/clang/lib/Format/FormatToken.h
index 3b3600fede97..0c5a5284627c 100644
--- a/contrib/llvm/tools/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm/tools/clang/lib/Format/FormatToken.h
@@ -53,6 +53,8 @@ namespace format {
TYPE(InlineASMColon) \
TYPE(JavaAnnotation) \
TYPE(JsComputedPropertyName) \
+ TYPE(JsExponentiation) \
+ TYPE(JsExponentiationEqual) \
TYPE(JsFatArrow) \
TYPE(JsNonNullAssertion) \
TYPE(JsTypeColon) \
diff --git a/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp
index 1acc0c306512..45c3ae1afe5f 100644
--- a/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Format/FormatTokenLexer.cpp
@@ -74,6 +74,10 @@ void FormatTokenLexer::tryMergePreviousTokens() {
static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
tok::greaterequal};
static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
+ static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
+ static const tok::TokenKind JSExponentiationEqual[] = {tok::star,
+ tok::starequal};
+
// FIXME: Investigate what token type gives the correct operator priority.
if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
return;
@@ -83,6 +87,12 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
return;
+ if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
+ return;
+ if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
+ Tokens.back()->Tok.setKind(tok::starequal);
+ return;
+ }
}
if (Style.Language == FormatStyle::LK_Java) {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index d3ebf48315e2..96854b8fbc1a 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -766,6 +766,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SanitizeCoverageTracePC = Args.hasArg(OPT_fsanitize_coverage_trace_pc);
Opts.SanitizeCoverageTracePCGuard =
Args.hasArg(OPT_fsanitize_coverage_trace_pc_guard);
+ Opts.SanitizeCoverageNoPrune = Args.hasArg(OPT_fsanitize_coverage_no_prune);
Opts.SanitizeMemoryTrackOrigins =
getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
Opts.SanitizeMemoryUseAfterDtor =
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
index d26b6937b851..1fbb2b054bad 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -136,6 +136,12 @@ void FrontendAction::setCurrentInput(const FrontendInputFile &CurrentInput,
CurrentASTUnit = std::move(AST);
}
+Module *FrontendAction::getCurrentModule() const {
+ CompilerInstance &CI = getCompilerInstance();
+ return CI.getPreprocessor().getHeaderSearchInfo().lookupModule(
+ CI.getLangOpts().CurrentModule, /*AllowSearch*/false);
+}
+
std::unique_ptr<ASTConsumer>
FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
StringRef InFile) {
@@ -188,16 +194,25 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
}
-// For preprocessed files, if the first line is the linemarker and specifies
-// the original source file name, use that name as the input file name.
-static bool ReadOriginalFileName(CompilerInstance &CI, std::string &InputFile)
-{
- bool Invalid = false;
+/// For preprocessed files, if the first line is the linemarker and specifies
+/// the original source file name, use that name as the input file name.
+/// Returns the location of the first token after the line marker directive.
+///
+/// \param CI The compiler instance.
+/// \param InputFile Populated with the filename from the line marker.
+/// \param AddLineNote If \c true, add a line note corresponding to this line
+/// directive. Only use this if the directive will not actually be
+/// visited by the preprocessor.
+static SourceLocation ReadOriginalFileName(CompilerInstance &CI,
+ std::string &InputFile,
+ bool AddLineNote = false) {
auto &SourceMgr = CI.getSourceManager();
auto MainFileID = SourceMgr.getMainFileID();
+
+ bool Invalid = false;
const auto *MainFileBuf = SourceMgr.getBuffer(MainFileID, &Invalid);
if (Invalid)
- return false;
+ return SourceLocation();
std::unique_ptr<Lexer> RawLexer(
new Lexer(MainFileID, MainFileBuf, SourceMgr, CI.getLangOpts()));
@@ -209,19 +224,37 @@ static bool ReadOriginalFileName(CompilerInstance &CI, std::string &InputFile)
// we use FILENAME as the input file name.
Token T;
if (RawLexer->LexFromRawLexer(T) || T.getKind() != tok::hash)
- return false;
+ return SourceLocation();
if (RawLexer->LexFromRawLexer(T) || T.isAtStartOfLine() ||
T.getKind() != tok::numeric_constant)
- return false;
+ return SourceLocation();
+
+ unsigned LineNo;
+ SourceLocation LineNoLoc = T.getLocation();
+ if (AddLineNote) {
+ llvm::SmallString<16> Buffer;
+ if (Lexer::getSpelling(LineNoLoc, Buffer, SourceMgr, CI.getLangOpts())
+ .getAsInteger(10, LineNo))
+ return SourceLocation();
+ }
+
RawLexer->LexFromRawLexer(T);
if (T.isAtStartOfLine() || T.getKind() != tok::string_literal)
- return false;
+ return SourceLocation();
StringLiteralParser Literal(T, CI.getPreprocessor());
if (Literal.hadError)
- return false;
+ return SourceLocation();
+ RawLexer->LexFromRawLexer(T);
+ if (T.isNot(tok::eof) && !T.isAtStartOfLine())
+ return SourceLocation();
InputFile = Literal.GetString().str();
- return true;
+
+ if (AddLineNote)
+ CI.getSourceManager().AddLineNote(
+ LineNoLoc, LineNo, SourceMgr.getLineTableFilenameID(InputFile));
+
+ return T.getLocation();
}
static SmallVectorImpl<char> &
@@ -339,42 +372,44 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
return std::error_code();
}
-/// Parse a module map and compute the corresponding real input buffer that
-/// should be used to build the module described by that module map and the
-/// current module name.
-static std::unique_ptr<llvm::MemoryBuffer>
-getInputBufferForModuleMap(CompilerInstance &CI, StringRef Filename,
- bool IsSystem) {
- // Find the module map file.
- const FileEntry *ModuleMap =
- CI.getFileManager().getFile(Filename, /*openFile*/true);
- if (!ModuleMap) {
- CI.getDiagnostics().Report(diag::err_module_map_not_found)
- << Filename;
- return nullptr;
- }
+static bool
+loadModuleMapForModuleBuild(CompilerInstance &CI, StringRef Filename,
+ bool IsSystem, bool IsPreprocessed,
+ unsigned &Offset) {
+ auto &SrcMgr = CI.getSourceManager();
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
- // Find the module map file from which it was generated, if different.
- const FileEntry *OriginalModuleMap = ModuleMap;
- StringRef OriginalModuleMapName = CI.getFrontendOpts().OriginalModuleMap;
- if (!OriginalModuleMapName.empty()) {
- OriginalModuleMap = CI.getFileManager().getFile(OriginalModuleMapName,
- /*openFile*/ true);
- if (!OriginalModuleMap) {
- CI.getDiagnostics().Report(diag::err_module_map_not_found)
- << OriginalModuleMapName;
- return nullptr;
- }
+ // Map the current input to a file.
+ FileID ModuleMapID = SrcMgr.getMainFileID();
+ const FileEntry *ModuleMap = SrcMgr.getFileEntryForID(ModuleMapID);
+
+ // If the module map is preprocessed, handle the initial line marker;
+ // line directives are not part of the module map syntax in general.
+ Offset = 0;
+ if (IsPreprocessed) {
+ std::string PresumedModuleMapFile;
+ SourceLocation EndOfLineMarker =
+ ReadOriginalFileName(CI, PresumedModuleMapFile, /*AddLineNote*/true);
+ if (EndOfLineMarker.isValid())
+ Offset = CI.getSourceManager().getDecomposedLoc(EndOfLineMarker).second;
+ // FIXME: Use PresumedModuleMapFile as the MODULE_MAP_FILE in the PCM.
}
-
- // Parse the module map file.
- HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
- if (HS.loadModuleMapFile(ModuleMap, IsSystem))
- return nullptr;
-
+
+ // Load the module map file.
+ if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset))
+ return true;
+
+ if (SrcMgr.getBuffer(ModuleMapID)->getBufferSize() == Offset)
+ Offset = 0;
+
+ return false;
+}
+
+static Module *prepareToBuildModule(CompilerInstance &CI,
+ StringRef ModuleMapFilename) {
if (CI.getLangOpts().CurrentModule.empty()) {
CI.getDiagnostics().Report(diag::err_missing_module_name);
-
+
// FIXME: Eventually, we could consider asking whether there was just
// a single module described in the module map, and use that as a
// default. Then it would be fairly trivial to just "compile" a module
@@ -382,21 +417,14 @@ getInputBufferForModuleMap(CompilerInstance &CI, StringRef Filename,
return nullptr;
}
- // If we're being run from the command-line, the module build stack will not
- // have been filled in yet, so complete it now in order to allow us to detect
- // module cycles.
- SourceManager &SourceMgr = CI.getSourceManager();
- if (SourceMgr.getModuleBuildStack().empty())
- SourceMgr.pushModuleBuildStack(CI.getLangOpts().CurrentModule,
- FullSourceLoc(SourceLocation(), SourceMgr));
-
// Dig out the module definition.
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
Module *M = HS.lookupModule(CI.getLangOpts().CurrentModule,
/*AllowSearch=*/false);
if (!M) {
CI.getDiagnostics().Report(diag::err_missing_module)
- << CI.getLangOpts().CurrentModule << Filename;
-
+ << CI.getLangOpts().CurrentModule << ModuleMapFilename;
+
return nullptr;
}
@@ -417,11 +445,45 @@ getInputBufferForModuleMap(CompilerInstance &CI, StringRef Filename,
return nullptr;
}
- if (OriginalModuleMap != ModuleMap) {
- M->IsInferred = true;
- HS.getModuleMap().setInferredModuleAllowedBy(M, OriginalModuleMap);
+ // Inform the preprocessor that includes from within the input buffer should
+ // be resolved relative to the build directory of the module map file.
+ CI.getPreprocessor().setMainFileDir(M->Directory);
+
+ // If the module was inferred from a different module map (via an expanded
+ // umbrella module definition), track that fact.
+ // FIXME: It would be preferable to fill this in as part of processing
+ // the module map, rather than adding it after the fact.
+ StringRef OriginalModuleMapName = CI.getFrontendOpts().OriginalModuleMap;
+ if (!OriginalModuleMapName.empty()) {
+ auto *OriginalModuleMap =
+ CI.getFileManager().getFile(OriginalModuleMapName,
+ /*openFile*/ true);
+ if (!OriginalModuleMap) {
+ CI.getDiagnostics().Report(diag::err_module_map_not_found)
+ << OriginalModuleMapName;
+ return nullptr;
+ }
+ if (OriginalModuleMap != CI.getSourceManager().getFileEntryForID(
+ CI.getSourceManager().getMainFileID())) {
+ M->IsInferred = true;
+ CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()
+ .setInferredModuleAllowedBy(M, OriginalModuleMap);
+ }
}
+ // If we're being run from the command-line, the module build stack will not
+ // have been filled in yet, so complete it now in order to allow us to detect
+ // module cycles.
+ SourceManager &SourceMgr = CI.getSourceManager();
+ if (SourceMgr.getModuleBuildStack().empty())
+ SourceMgr.pushModuleBuildStack(CI.getLangOpts().CurrentModule,
+ FullSourceLoc(SourceLocation(), SourceMgr));
+ return M;
+}
+
+/// Compute the input buffer that should be used to build the specified module.
+static std::unique_ptr<llvm::MemoryBuffer>
+getInputBufferForModule(CompilerInstance &CI, Module *M) {
FileManager &FileMgr = CI.getFileManager();
// Collect the set of #includes we need to build the module.
@@ -441,10 +503,6 @@ getInputBufferForModuleMap(CompilerInstance &CI, StringRef Filename,
return nullptr;
}
- // Inform the preprocessor that includes from within the input buffer should
- // be resolved relative to the build directory of the module map file.
- CI.getPreprocessor().setMainFileDir(M->Directory);
-
return llvm::MemoryBuffer::getMemBufferCopy(
HeaderContents, Module::getModuleInputBufferName());
}
@@ -457,7 +515,6 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
setCompilerInstance(&CI);
StringRef InputFile = Input.getFile();
- FrontendInputFile FileToProcess = Input;
bool HasBegunSourceFile = false;
if (!BeginInvocation(CI))
goto failure;
@@ -597,36 +654,45 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
&CI.getPreprocessor());
HasBegunSourceFile = true;
+ // Initialize the main file entry.
+ if (!CI.InitializeSourceManager(Input))
+ goto failure;
+
// For module map files, we first parse the module map and synthesize a
// "<module-includes>" buffer before more conventional processing.
if (Input.getKind().getFormat() == InputKind::ModuleMap) {
CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleMap);
- auto Buffer = getInputBufferForModuleMap(CI, InputFile, Input.isSystem());
- if (!Buffer)
+ unsigned OffsetToContents;
+ if (loadModuleMapForModuleBuild(CI, Input.getFile(), Input.isSystem(),
+ Input.isPreprocessed(), OffsetToContents))
goto failure;
- Module *CurrentModule =
- CI.getPreprocessor().getHeaderSearchInfo().lookupModule(
- CI.getLangOpts().CurrentModule,
- /*AllowSearch=*/false);
- assert(CurrentModule && "no module info for current module");
+ auto *CurrentModule = prepareToBuildModule(CI, Input.getFile());
+ if (!CurrentModule)
+ goto failure;
+
+ if (OffsetToContents)
+ // If the module contents are in the same file, skip to them.
+ CI.getPreprocessor().setSkipMainFilePreamble(OffsetToContents, true);
+ else {
+ // Otherwise, convert the module description to a suitable input buffer.
+ auto Buffer = getInputBufferForModule(CI, CurrentModule);
+ if (!Buffer)
+ goto failure;
- // The input that we end up processing is the generated buffer, not the
- // module map file itself.
- FileToProcess = FrontendInputFile(
- Buffer.release(), Input.getKind().withFormat(InputKind::Source),
- CurrentModule->IsSystem);
+ // Reinitialize the main file entry to refer to the new input.
+ if (!CI.InitializeSourceManager(FrontendInputFile(
+ Buffer.release(), Input.getKind().withFormat(InputKind::Source),
+ CurrentModule->IsSystem)))
+ goto failure;
+ }
}
// Initialize the action.
if (!BeginSourceFileAction(CI, InputFile))
goto failure;
- // Initialize the main file entry.
- if (!CI.InitializeSourceManager(FileToProcess))
- goto failure;
-
// Create the AST context and consumer unless this is a preprocessor only
// action.
if (!usesPreprocessorOnly()) {
@@ -636,13 +702,12 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// For preprocessed files, check if the first line specifies the original
// source file name with a linemarker.
- std::string OrigFile;
+ std::string PresumedInputFile = InputFile;
if (Input.isPreprocessed())
- if (ReadOriginalFileName(CI, OrigFile))
- InputFile = OrigFile;
+ ReadOriginalFileName(CI, PresumedInputFile);
std::unique_ptr<ASTConsumer> Consumer =
- CreateWrappedASTConsumer(CI, InputFile);
+ CreateWrappedASTConsumer(CI, PresumedInputFile);
if (!Consumer)
goto failure;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
index dd7c12f60f0e..baaf93b167bc 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -542,6 +542,18 @@ void PrintPreprocessedAction::ExecuteAction() {
CI.createDefaultOutputFile(BinaryMode, getCurrentFile());
if (!OS) return;
+ // If we're preprocessing a module map, start by dumping the contents of the
+ // module itself before switching to the input buffer.
+ auto &Input = getCurrentInput();
+ if (Input.getKind().getFormat() == InputKind::ModuleMap) {
+ if (Input.isFile())
+ (*OS) << "# 1 \"" << Input.getFile() << "\"\n";
+ // FIXME: Include additional information here so that we don't need the
+ // original source files to exist on disk.
+ getCurrentModule()->print(*OS);
+ (*OS) << "#pragma clang module contents\n";
+ }
+
DoPrintPreprocessedInput(CI.getPreprocessor(), OS.get(),
CI.getPreprocessorOutputOpts());
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index ffedf3cac847..832eaf2926f0 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -174,6 +174,9 @@ public:
void MacroUndefined(const Token &MacroNameTok,
const MacroDefinition &MD,
const MacroDirective *Undef) override;
+
+ void BeginModule(const Module *M);
+ void EndModule(const Module *M);
};
} // end anonymous namespace
@@ -372,6 +375,20 @@ void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
}
}
+/// Handle entering the scope of a module during a module compilation.
+void PrintPPOutputPPCallbacks::BeginModule(const Module *M) {
+ startNewLineIfNeeded();
+ OS << "#pragma clang module begin " << M->getFullModuleName();
+ setEmittedDirectiveOnThisLine();
+}
+
+/// Handle leaving the scope of a module during a module compilation.
+void PrintPPOutputPPCallbacks::EndModule(const Module *M) {
+ startNewLineIfNeeded();
+ OS << "#pragma clang module end /*" << M->getFullModuleName() << "*/";
+ setEmittedDirectiveOnThisLine();
+}
+
/// Ident - Handle #ident directives when read by the preprocessor.
///
void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, StringRef S) {
@@ -685,13 +702,27 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
// -traditional-cpp the lexer keeps /all/ whitespace, including comments.
SourceLocation StartLoc = Tok.getLocation();
Callbacks->MoveToLine(StartLoc.getLocWithOffset(Tok.getLength()));
- } else if (Tok.is(tok::annot_module_include) ||
- Tok.is(tok::annot_module_begin) ||
- Tok.is(tok::annot_module_end)) {
+ } else if (Tok.is(tok::annot_module_include)) {
// PrintPPOutputPPCallbacks::InclusionDirective handles producing
// appropriate output here. Ignore this token entirely.
PP.Lex(Tok);
continue;
+ } else if (Tok.is(tok::annot_module_begin)) {
+ // FIXME: We retrieve this token after the FileChanged callback, and
+ // retrieve the module_end token before the FileChanged callback, so
+ // we render this within the file and render the module end outside the
+ // file, but this is backwards from the token locations: the module_begin
+ // token is at the include location (outside the file) and the module_end
+ // token is at the EOF location (within the file).
+ Callbacks->BeginModule(
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
+ PP.Lex(Tok);
+ continue;
+ } else if (Tok.is(tok::annot_module_end)) {
+ Callbacks->EndModule(
+ reinterpret_cast<Module *>(Tok.getAnnotationValue()));
+ PP.Lex(Tok);
+ continue;
} else if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
OS << II->getName();
} else if (Tok.isLiteral() && !Tok.needsCleaning() &&
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index 2e76e2e3151e..8c5eb161b5ab 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -196,6 +196,18 @@ void RewriteIncludesAction::ExecuteAction() {
CI.createDefaultOutputFile(true, getCurrentFile());
if (!OS) return;
+ // If we're preprocessing a module map, start by dumping the contents of the
+ // module itself before switching to the input buffer.
+ auto &Input = getCurrentInput();
+ if (Input.getKind().getFormat() == InputKind::ModuleMap) {
+ if (Input.isFile())
+ (*OS) << "# 1 \"" << Input.getFile() << "\"\n";
+ // FIXME: Include additional information here so that we don't need the
+ // original source files to exist on disk.
+ getCurrentModule()->print(*OS);
+ (*OS) << "#pragma clang module contents\n";
+ }
+
RewriteIncludesInInput(CI.getPreprocessor(), OS.get(),
CI.getPreprocessorOutputOpts());
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
index ee61f76d029d..d45cbc01df8c 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -46,6 +46,8 @@ class InclusionRewriter : public PPCallbacks {
std::map<unsigned, IncludedFile> FileIncludes;
/// Tracks where inclusions that import modules are found.
std::map<unsigned, const Module *> ModuleIncludes;
+ /// Tracks where inclusions that enter modules (in a module build) are found.
+ std::map<unsigned, const Module *> ModuleEntryIncludes;
/// Used transitively for building up the FileIncludes mapping over the
/// various \c PPCallbacks callbacks.
SourceLocation LastInclusionLocation;
@@ -57,6 +59,11 @@ public:
PredefinesBuffer = Buf;
}
void detectMainFileEOL();
+ void handleModuleBegin(Token &Tok) {
+ assert(Tok.getKind() == tok::annot_module_begin);
+ ModuleEntryIncludes.insert({Tok.getLocation().getRawEncoding(),
+ (Module *)Tok.getAnnotationValue()});
+ }
private:
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -84,6 +91,7 @@ private:
bool &FileExists);
const IncludedFile *FindIncludeAtLocation(SourceLocation Loc) const;
const Module *FindModuleAtLocation(SourceLocation Loc) const;
+ const Module *FindEnteredModule(SourceLocation Loc) const;
StringRef NextIdentifierName(Lexer &RawLex, Token &RawToken);
};
@@ -211,6 +219,16 @@ InclusionRewriter::FindModuleAtLocation(SourceLocation Loc) const {
return nullptr;
}
+/// Simple lookup for a SourceLocation (specifically one denoting the hash in
+/// an inclusion directive) in the map of module entry information.
+const Module *
+InclusionRewriter::FindEnteredModule(SourceLocation Loc) const {
+ const auto I = ModuleEntryIncludes.find(Loc.getRawEncoding());
+ if (I != ModuleEntryIncludes.end())
+ return I->second;
+ return nullptr;
+}
+
/// Detect the likely line ending style of \p FromFile by examining the first
/// newline found within it.
static StringRef DetectEOL(const MemoryBuffer &FromFile) {
@@ -452,8 +470,18 @@ void InclusionRewriter::Process(FileID FileId,
if (const Module *Mod = FindModuleAtLocation(Loc))
WriteImplicitModuleImport(Mod);
else if (const IncludedFile *Inc = FindIncludeAtLocation(Loc)) {
+ const Module *Mod = FindEnteredModule(Loc);
+ if (Mod)
+ OS << "#pragma clang module begin " << Mod->getFullModuleName()
+ << "\n";
+
// Include and recursively process the file.
Process(Inc->Id, Inc->FileType);
+
+ if (Mod)
+ OS << "#pragma clang module end /*" << Mod->getFullModuleName()
+ << "*/\n";
+
// Add line marker to indicate we're returning from an included
// file.
LineInfoExtra = " 2";
@@ -590,6 +618,8 @@ void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
PP.SetMacroExpansionOnlyInDirectives();
do {
PP.Lex(Tok);
+ if (Tok.is(tok::annot_module_begin))
+ Rewrite->handleModuleBegin(Tok);
} while (Tok.isNot(tok::eof));
Rewrite->setPredefinesBuffer(SM.getBuffer(PP.getPredefinesFileID()));
Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User);
diff --git a/contrib/llvm/tools/clang/lib/Headers/arm_acle.h b/contrib/llvm/tools/clang/lib/Headers/arm_acle.h
index 8423e62a381b..ab2589798269 100644
--- a/contrib/llvm/tools/clang/lib/Headers/arm_acle.h
+++ b/contrib/llvm/tools/clang/lib/Headers/arm_acle.h
@@ -225,19 +225,49 @@ __rbitl(unsigned long __t) {
}
/*
+ * 9.3 16-bit multiplications
+ */
+#if __ARM_FEATURE_DSP
+static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
+__smulbb(int32_t __a, int32_t __b) {
+ return __builtin_arm_smulbb(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
+__smulbt(int32_t __a, int32_t __b) {
+ return __builtin_arm_smulbt(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
+__smultb(int32_t __a, int32_t __b) {
+ return __builtin_arm_smultb(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
+__smultt(int32_t __a, int32_t __b) {
+ return __builtin_arm_smultt(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
+__smulwb(int32_t __a, int32_t __b) {
+ return __builtin_arm_smulwb(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
+__smulwt(int32_t __a, int32_t __b) {
+ return __builtin_arm_smulwt(__a, __b);
+}
+#endif
+
+/*
* 9.4 Saturating intrinsics
*
* FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
* intrinsics are implemented and the flag is enabled.
*/
/* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_32BIT_STATE
+#if __ARM_FEATURE_SAT
#define __ssat(x, y) __builtin_arm_ssat(x, y)
#define __usat(x, y) __builtin_arm_usat(x, y)
#endif
/* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_32BIT_STATE
+#if __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__qadd(int32_t __t, int32_t __v) {
return __builtin_arm_qadd(__t, __v);
@@ -254,6 +284,290 @@ __qdbl(int32_t __t) {
}
#endif
+/* 9.4.3 Accumultating multiplications */
+#if __ARM_FEATURE_DSP
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlabb(int32_t __a, int32_t __b, int32_t __c) {
+ return __builtin_arm_smlabb(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlabt(int32_t __a, int32_t __b, int32_t __c) {
+ return __builtin_arm_smlabt(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlatb(int32_t __a, int32_t __b, int32_t __c) {
+ return __builtin_arm_smlatb(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlatt(int32_t __a, int32_t __b, int32_t __c) {
+ return __builtin_arm_smlatt(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlawb(int32_t __a, int32_t __b, int32_t __c) {
+ return __builtin_arm_smlawb(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlawt(int32_t __a, int32_t __b, int32_t __c) {
+ return __builtin_arm_smlawt(__a, __b, __c);
+}
+#endif
+
+
+/* 9.5.4 Parallel 16-bit saturation */
+#if __ARM_FEATURE_SIMD32
+#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
+#define __usat16(x, y) __builtin_arm_usat16(x, y)
+#endif
+
+/* 9.5.5 Packing and unpacking */
+#if __ARM_FEATURE_SIMD32
+typedef int32_t int8x4_t;
+typedef int32_t int16x2_t;
+typedef uint32_t uint8x4_t;
+typedef uint32_t uint16x2_t;
+
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__sxtab16(int16x2_t __a, int8x4_t __b) {
+ return __builtin_arm_sxtab16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__sxtb16(int8x4_t __a) {
+ return __builtin_arm_sxtb16(__a);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__uxtab16(int16x2_t __a, int8x4_t __b) {
+ return __builtin_arm_uxtab16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__uxtb16(int8x4_t __a) {
+ return __builtin_arm_uxtb16(__a);
+}
+#endif
+
+/* 9.5.6 Parallel selection */
+#if __ARM_FEATURE_SIMD32
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__sel(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_sel(__a, __b);
+}
+#endif
+
+/* 9.5.7 Parallel 8-bit addition and subtraction */
+#if __ARM_FEATURE_SIMD32
+static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
+__qadd8(int8x4_t __a, int8x4_t __b) {
+ return __builtin_arm_qadd8(__a, __b);
+}
+static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
+__qsub8(int8x4_t __a, int8x4_t __b) {
+ return __builtin_arm_qsub8(__a, __b);
+}
+static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
+__sadd8(int8x4_t __a, int8x4_t __b) {
+ return __builtin_arm_sadd8(__a, __b);
+}
+static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
+__shadd8(int8x4_t __a, int8x4_t __b) {
+ return __builtin_arm_shadd8(__a, __b);
+}
+static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
+__shsub8(int8x4_t __a, int8x4_t __b) {
+ return __builtin_arm_shsub8(__a, __b);
+}
+static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
+__ssub8(int8x4_t __a, int8x4_t __b) {
+ return __builtin_arm_ssub8(__a, __b);
+}
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__uadd8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_uadd8(__a, __b);
+}
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__uhadd8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_uhadd8(__a, __b);
+}
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__uhsub8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_uhsub8(__a, __b);
+}
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__uqadd8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_uqadd8(__a, __b);
+}
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__uqsub8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_uqsub8(__a, __b);
+}
+static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
+__usub8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_usub8(__a, __b);
+}
+#endif
+
+/* 9.5.8 Sum of 8-bit absolute differences */
+#if __ARM_FEATURE_SIMD32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__usad8(uint8x4_t __a, uint8x4_t __b) {
+ return __builtin_arm_usad8(__a, __b);
+}
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
+ return __builtin_arm_usada8(__a, __b, __c);
+}
+#endif
+
+/* 9.5.9 Parallel 16-bit addition and subtraction */
+#if __ARM_FEATURE_SIMD32
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__qadd16(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_qadd16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__qasx(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_qasx(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__qsax(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_qsax(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__qsub16(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_qsub16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__sadd16(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_sadd16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__sasx(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_sasx(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__shadd16(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_shadd16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__shasx(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_shasx(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__shsax(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_shsax(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__shsub16(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_shsub16(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__ssax(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_ssax(__a, __b);
+}
+static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
+__ssub16(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_ssub16(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uadd16(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uadd16(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uasx(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uasx(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uhadd16(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uhadd16(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uhasx(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uhasx(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uhsax(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uhsax(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uhsub16(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uhsub16(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uqadd16(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uqadd16(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uqasx(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uqasx(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uqsax(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uqsax(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__uqsub16(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_uqsub16(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__usax(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_usax(__a, __b);
+}
+static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
+__usub16(uint16x2_t __a, uint16x2_t __b) {
+ return __builtin_arm_usub16(__a, __b);
+}
+#endif
+
+/* 9.5.10 Parallel 16-bit multiplications */
+#if __ARM_FEATURE_SIMD32
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
+ return __builtin_arm_smlad(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
+ return __builtin_arm_smladx(__a, __b, __c);
+}
+static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
+__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
+ return __builtin_arm_smlald(__a, __b, __c);
+}
+static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
+__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
+ return __builtin_arm_smlaldx(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
+ return __builtin_arm_smlsd(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
+ return __builtin_arm_smlsdx(__a, __b, __c);
+}
+static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
+__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
+ return __builtin_arm_smlsld(__a, __b, __c);
+}
+static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
+__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
+ return __builtin_arm_smlsldx(__a, __b, __c);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smuad(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_smuad(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smuadx(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_smuadx(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smusd(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_smusd(__a, __b);
+}
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+__smusdx(int16x2_t __a, int16x2_t __b) {
+ return __builtin_arm_smusdx(__a, __b);
+}
+#endif
+
/* 9.7 CRC32 intrinsics */
#if __ARM_FEATURE_CRC32
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
diff --git a/contrib/llvm/tools/clang/lib/Headers/lwpintrin.h b/contrib/llvm/tools/clang/lib/Headers/lwpintrin.h
new file mode 100644
index 000000000000..c95fdd9a201a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/lwpintrin.h
@@ -0,0 +1,150 @@
+/*===---- lwpintrin.h - LWP intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <lwpintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LWPINTRIN_H
+#define __LWPINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lwp")))
+
+/// \brief Parses the LWPCB at the specified address and enables
+/// profiling if valid.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LLWPCB </c> instruction.
+///
+/// \param __addr
+/// Address to the new Lightweight Profiling Control Block (LWPCB). If the
+/// LWPCB is valid, writes the address into the LWP_CBADDR MSR and enables
+/// Lightweight Profiling.
+static __inline__ void __DEFAULT_FN_ATTRS
+__llwpcb (void *__addr)
+{
+ __builtin_ia32_llwpcb(__addr);
+}
+
+/// \brief Flushes the LWP state to memory and returns the address of the LWPCB.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> SLWPCB </c> instruction.
+///
+/// \return
+/// Address to the current Lightweight Profiling Control Block (LWPCB).
+/// If LWP is not currently enabled, returns NULL.
+static __inline__ void* __DEFAULT_FN_ATTRS
+__slwpcb ()
+{
+ return __builtin_ia32_slwpcb();
+}
+
+/// \brief Inserts programmed event record into the LWP event ring buffer
+/// and advances the ring buffer pointer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LWPINS </c> instruction.
+///
+/// \param DATA2
+/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field.
+/// \param DATA1
+/// A 32-bit value is inserted into the 32-bit Data1 field.
+/// \param FLAGS
+/// A 32-bit immediate value is inserted into the 32-bit Flags field.
+/// \returns If the ring buffer is full and LWP is running in Synchronized Mode,
+/// the event record overwrites the last record in the buffer, the MissedEvents
+/// counter in the LWPCB is incremented, the head pointer is not advanced, and
+/// 1 is returned. Otherwise 0 is returned.
+#define __lwpins32(DATA2, DATA1, FLAGS) \
+ (__builtin_ia32_lwpins32((unsigned int) (DATA2), (unsigned int) (DATA1), \
+ (unsigned int) (FLAGS)))
+
+/// \brief Decrements the LWP programmed value sample event counter. If the result is
+/// negative, inserts an event record into the LWP event ring buffer in memory
+/// and advances the ring buffer pointer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LWPVAL </c> instruction.
+///
+/// \param DATA2
+/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field.
+/// \param DATA1
+/// A 32-bit value is inserted into the 32-bit Data1 field.
+/// \param FLAGS
+/// A 32-bit immediate value is inserted into the 32-bit Flags field.
+#define __lwpval32(DATA2, DATA1, FLAGS) \
+ (__builtin_ia32_lwpval32((unsigned int) (DATA2), (unsigned int) (DATA1), \
+ (unsigned int) (FLAGS)))
+
+#ifdef __x86_64__
+
+/// \brief Inserts programmed event record into the LWP event ring buffer
+/// and advances the ring buffer pointer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LWPINS </c> instruction.
+///
+/// \param DATA2
+/// A 64-bit value is inserted into the 64-bit Data2 field.
+/// \param DATA1
+/// A 32-bit value is inserted into the 32-bit Data1 field.
+/// \param FLAGS
+/// A 32-bit immediate value is inserted into the 32-bit Flags field.
+/// \returns If the ring buffer is full and LWP is running in Synchronized Mode,
+/// the event record overwrites the last record in the buffer, the MissedEvents
+/// counter in the LWPCB is incremented, the head pointer is not advanced, and
+/// 1 is returned. Otherwise 0 is returned.
+#define __lwpins64(DATA2, DATA1, FLAGS) \
+ (__builtin_ia32_lwpins64((unsigned long long) (DATA2), (unsigned int) (DATA1), \
+ (unsigned int) (FLAGS)))
+
+/// \brief Decrements the LWP programmed value sample event counter. If the result is
+/// negative, inserts an event record into the LWP event ring buffer in memory
+/// and advances the ring buffer pointer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LWPVAL </c> instruction.
+///
+/// \param DATA2
+/// A 64-bit value is and inserted into the 64-bit Data2 field.
+/// \param DATA1
+/// A 32-bit value is inserted into the 32-bit Data1 field.
+/// \param FLAGS
+/// A 32-bit immediate value is inserted into the 32-bit Flags field.
+#define __lwpval64(DATA2, DATA1, FLAGS) \
+ (__builtin_ia32_lwpval64((unsigned long long) (DATA2), (unsigned int) (DATA1), \
+ (unsigned int) (FLAGS)))
+
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __LWPINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
index 2003029cb5a8..ef1d02948c8b 100644
--- a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -72,6 +72,10 @@
#include <tbmintrin.h>
#endif
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LWP__)
+#include <lwpintrin.h>
+#endif
+
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
#include <f16cintrin.h>
#endif
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp b/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp
index e8b2f1052d73..7de70a10b692 100644
--- a/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/IndexDecl.cpp
@@ -184,9 +184,7 @@ public:
continue;
}
Relations.emplace_back(
- SymbolRoleSet(SymbolRole::RelationOverrideOf) |
- SymbolRoleSet(SymbolRole::RelationSpecializationOf),
- ND);
+ SymbolRoleSet(SymbolRole::RelationSpecializationOf), ND);
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
index bd425a07c33a..f5b7c59e446f 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -1325,7 +1325,8 @@ static const FileEntry *getPrivateModuleMap(const FileEntry *File,
return FileMgr.getFile(PrivateFilename);
}
-bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
+bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
+ FileID ID, unsigned *Offset) {
// Find the directory for the module. For frameworks, that may require going
// up from the 'Modules' directory.
const DirectoryEntry *Dir = nullptr;
@@ -1344,7 +1345,7 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
}
}
- switch (loadModuleMapFileImpl(File, IsSystem, Dir)) {
+ switch (loadModuleMapFileImpl(File, IsSystem, Dir, ID, Offset)) {
case LMM_AlreadyLoaded:
case LMM_NewlyLoaded:
return false;
@@ -1357,7 +1358,8 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem) {
HeaderSearch::LoadModuleMapResult
HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *Dir) {
+ const DirectoryEntry *Dir, FileID ID,
+ unsigned *Offset) {
assert(File && "expected FileEntry");
// Check whether we've already loaded this module map, and mark it as being
@@ -1366,7 +1368,7 @@ HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
if (!AddResult.second)
return AddResult.first->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
- if (ModMap.parseModuleMapFile(File, IsSystem, Dir)) {
+ if (ModMap.parseModuleMapFile(File, IsSystem, Dir, ID, Offset)) {
LoadedModuleMaps[File] = false;
return LMM_InvalidModuleMap;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index 003c9b5eed1b..3d6fe91115a9 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -452,6 +452,29 @@ bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
return false;
}
+/// Returns the pointer that points to the beginning of line that contains
+/// the given offset, or null if the offset if invalid.
+static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) {
+ const char *BufStart = Buffer.data();
+ if (Offset >= Buffer.size())
+ return nullptr;
+ const char *StrData = BufStart + Offset;
+
+ if (StrData[0] == '\n' || StrData[0] == '\r')
+ return StrData;
+
+ const char *LexStart = StrData;
+ while (LexStart != BufStart) {
+ if (LexStart[0] == '\n' || LexStart[0] == '\r') {
+ ++LexStart;
+ break;
+ }
+
+ --LexStart;
+ }
+ return LexStart;
+}
+
static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
@@ -467,27 +490,15 @@ static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
// Back up from the current location until we hit the beginning of a line
// (or the buffer). We'll relex from that point.
- const char *BufStart = Buffer.data();
- if (LocInfo.second >= Buffer.size())
- return Loc;
-
- const char *StrData = BufStart+LocInfo.second;
- if (StrData[0] == '\n' || StrData[0] == '\r')
+ const char *StrData = Buffer.data() + LocInfo.second;
+ const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second);
+ if (!LexStart || LexStart == StrData)
return Loc;
-
- const char *LexStart = StrData;
- while (LexStart != BufStart) {
- if (LexStart[0] == '\n' || LexStart[0] == '\r') {
- ++LexStart;
- break;
- }
-
- --LexStart;
- }
// Create a lexer starting at the beginning of this token.
SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
- Lexer TheLexer(LexerStartLoc, LangOpts, BufStart, LexStart, Buffer.end());
+ Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart,
+ Buffer.end());
TheLexer.SetCommentRetentionState(true);
// Lex tokens until we find the token that contains the source location.
@@ -1038,6 +1049,27 @@ bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
return isIdentifierBody(c, LangOpts.DollarIdents);
}
+StringRef Lexer::getIndentationForLine(SourceLocation Loc,
+ const SourceManager &SM) {
+ if (Loc.isInvalid() || Loc.isMacroID())
+ return "";
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return "";
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return "";
+ const char *Line = findBeginningOfLine(Buffer, LocInfo.second);
+ if (!Line)
+ return "";
+ StringRef Rest = Buffer.substr(Line - Buffer.data());
+ size_t NumWhitespaceChars = Rest.find_first_not_of(" \t");
+ return NumWhitespaceChars == StringRef::npos
+ ? ""
+ : Rest.take_front(NumWhitespaceChars);
+}
+
//===----------------------------------------------------------------------===//
// Diagnostics forwarding code.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
index 512d7dc5de68..70d37d3d7082 100644
--- a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -1132,14 +1132,17 @@ namespace clang {
}
bool parseModuleMapFile();
+
+ bool terminatedByDirective() { return false; }
+ SourceLocation getLocation() { return Tok.getLocation(); }
};
}
SourceLocation ModuleMapParser::consumeToken() {
-retry:
SourceLocation Result = Tok.getLocation();
+
+retry:
Tok.clear();
-
Token LToken;
L.LexFromRawLexer(LToken);
Tok.Location = LToken.getLocation().getRawEncoding();
@@ -1232,9 +1235,28 @@ retry:
case tok::comment:
goto retry;
-
+
+ case tok::hash:
+ // A module map can be terminated prematurely by
+ // #pragma clang module contents
+ // When building the module, we'll treat the rest of the file as the
+ // contents of the module.
+ {
+ auto NextIsIdent = [&](StringRef Str) -> bool {
+ L.LexFromRawLexer(LToken);
+ return !LToken.isAtStartOfLine() && LToken.is(tok::raw_identifier) &&
+ LToken.getRawIdentifier() == Str;
+ };
+ if (NextIsIdent("pragma") && NextIsIdent("clang") &&
+ NextIsIdent("module") && NextIsIdent("contents")) {
+ Tok.Kind = MMToken::EndOfFile;
+ break;
+ }
+ }
+ LLVM_FALLTHROUGH;
+
default:
- Diags.Report(LToken.getLocation(), diag::err_mmap_unknown_token);
+ Diags.Report(Tok.getLocation(), diag::err_mmap_unknown_token);
HadError = true;
goto retry;
}
@@ -1682,7 +1704,8 @@ void ModuleMapParser::parseExternModuleDecl() {
File, /*IsSystem=*/false,
Map.HeaderInfo.getHeaderSearchOpts().ModuleMapFileHomeIsCwd
? Directory
- : File->getDir(), ExternLoc);
+ : File->getDir(),
+ FileID(), nullptr, ExternLoc);
}
/// Whether to add the requirement \p Feature to the module \p M.
@@ -2522,28 +2545,45 @@ bool ModuleMapParser::parseModuleMapFile() {
}
bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *Dir,
+ const DirectoryEntry *Dir, FileID ID,
+ unsigned *Offset,
SourceLocation ExternModuleLoc) {
+ assert(Target && "Missing target information");
llvm::DenseMap<const FileEntry *, bool>::iterator Known
= ParsedModuleMap.find(File);
if (Known != ParsedModuleMap.end())
return Known->second;
+ // If the module map file wasn't already entered, do so now.
+ if (ID.isInvalid()) {
+ auto FileCharacter = IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
+ ID = SourceMgr.createFileID(File, ExternModuleLoc, FileCharacter);
+ }
+
assert(Target && "Missing target information");
- auto FileCharacter = IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
- FileID ID = SourceMgr.createFileID(File, ExternModuleLoc, FileCharacter);
const llvm::MemoryBuffer *Buffer = SourceMgr.getBuffer(ID);
if (!Buffer)
return ParsedModuleMap[File] = true;
+ assert((!Offset || *Offset <= Buffer->getBufferSize()) &&
+ "invalid buffer offset");
// Parse this module map file.
- Lexer L(ID, SourceMgr.getBuffer(ID), SourceMgr, MMapLangOpts);
+ Lexer L(SourceMgr.getLocForStartOfFile(ID), MMapLangOpts,
+ Buffer->getBufferStart(),
+ Buffer->getBufferStart() + (Offset ? *Offset : 0),
+ Buffer->getBufferEnd());
SourceLocation Start = L.getSourceLocation();
ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, File, Dir,
BuiltinIncludeDir, IsSystem);
bool Result = Parser.parseModuleMapFile();
ParsedModuleMap[File] = Result;
+ if (Offset) {
+ auto Loc = SourceMgr.getDecomposedLoc(Parser.getLocation());
+ assert(Loc.first == ID && "stopped in a different file?");
+ *Offset = Loc.second;
+ }
+
// Notify callbacks that we parsed it.
for (const auto &Cb : Callbacks)
Cb->moduleMapFileRead(Start, *File, IsSystem);
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
index 4826e399afda..06fee8e5b0a8 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -2049,12 +2049,12 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
M->getTopLevelModuleName() == getLangOpts().CurrentModule)
return;
- assert(!CurSubmodule && "should not have marked this as a module yet");
- CurSubmodule = M;
+ assert(!CurLexerSubmodule && "should not have marked this as a module yet");
+ CurLexerSubmodule = M;
// Let the macro handling code know that any future macros are within
// the new submodule.
- EnterSubmodule(M, HashLoc);
+ EnterSubmodule(M, HashLoc, /*ForPragma*/false);
// Let the parser know that any future declarations are within the new
// submodule.
@@ -2082,7 +2082,7 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
} else if (isInPrimaryFile()) {
Lookup = nullptr;
Diag(IncludeNextTok, diag::pp_include_next_in_primary);
- } else if (CurSubmodule) {
+ } else if (CurLexerSubmodule) {
// Start looking up in the directory *after* the one in which the current
// file would be found, if any.
assert(CurPPLexer && "#include_next directive in macro?");
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index fcc49b387034..1938328c904d 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -117,7 +117,7 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
CurLexer.reset(TheLexer);
CurPPLexer = TheLexer;
CurDirLookup = CurDir;
- CurSubmodule = nullptr;
+ CurLexerSubmodule = nullptr;
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_Lexer;
@@ -142,7 +142,7 @@ void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
CurDirLookup = CurDir;
CurPTHLexer.reset(PL);
CurPPLexer = CurPTHLexer.get();
- CurSubmodule = nullptr;
+ CurLexerSubmodule = nullptr;
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_PTHLexer;
@@ -337,6 +337,26 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
assert(!CurTokenLexer &&
"Ending a file when currently in a macro!");
+ // If we have an unclosed module region from a pragma at the end of a
+ // module, complain and close it now.
+ // FIXME: This is not correct if we are building a module from PTH.
+ const bool LeavingSubmodule = CurLexer && CurLexerSubmodule;
+ if ((LeavingSubmodule || IncludeMacroStack.empty()) &&
+ !BuildingSubmoduleStack.empty() &&
+ BuildingSubmoduleStack.back().IsPragma) {
+ Diag(BuildingSubmoduleStack.back().ImportLoc,
+ diag::err_pp_module_begin_without_module_end);
+ Module *M = LeaveSubmodule(/*ForPragma*/true);
+
+ Result.startToken();
+ const char *EndPos = getCurLexerEndPos();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_module_end);
+ Result.setAnnotationEndLoc(Result.getLocation());
+ Result.setAnnotationValue(M);
+ return true;
+ }
+
// See if this file had a controlling macro.
if (CurPPLexer) { // Not ending a macro, ignore it.
if (const IdentifierInfo *ControllingMacro =
@@ -442,18 +462,17 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (Callbacks && !isEndOfMacro && CurPPLexer)
ExitedFID = CurPPLexer->getFileID();
- bool LeavingSubmodule = CurSubmodule && CurLexer;
if (LeavingSubmodule) {
+ // We're done with this submodule.
+ Module *M = LeaveSubmodule(/*ForPragma*/false);
+
// Notify the parser that we've left the module.
const char *EndPos = getCurLexerEndPos();
Result.startToken();
CurLexer->BufferPtr = EndPos;
CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_module_end);
Result.setAnnotationEndLoc(Result.getLocation());
- Result.setAnnotationValue(CurSubmodule);
-
- // We're done with this submodule.
- LeaveSubmodule();
+ Result.setAnnotationValue(M);
}
// We're done with the #included file.
@@ -628,11 +647,13 @@ void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) {
assert(!FoundLexer && "Lexer should return EOD before EOF in PP mode");
}
-void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc) {
+void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc,
+ bool ForPragma) {
if (!getLangOpts().ModulesLocalVisibility) {
// Just track that we entered this submodule.
- BuildingSubmoduleStack.push_back(BuildingSubmoduleInfo(
- M, ImportLoc, CurSubmoduleState, PendingModuleMacroNames.size()));
+ BuildingSubmoduleStack.push_back(
+ BuildingSubmoduleInfo(M, ImportLoc, ForPragma, CurSubmoduleState,
+ PendingModuleMacroNames.size()));
return;
}
@@ -673,8 +694,9 @@ void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc) {
}
// Track that we entered this module.
- BuildingSubmoduleStack.push_back(BuildingSubmoduleInfo(
- M, ImportLoc, CurSubmoduleState, PendingModuleMacroNames.size()));
+ BuildingSubmoduleStack.push_back(
+ BuildingSubmoduleInfo(M, ImportLoc, ForPragma, CurSubmoduleState,
+ PendingModuleMacroNames.size()));
// Switch to this submodule as the current submodule.
CurSubmoduleState = &State;
@@ -697,7 +719,13 @@ bool Preprocessor::needModuleMacros() const {
return getLangOpts().isCompilingModule();
}
-void Preprocessor::LeaveSubmodule() {
+Module *Preprocessor::LeaveSubmodule(bool ForPragma) {
+ if (BuildingSubmoduleStack.empty() ||
+ BuildingSubmoduleStack.back().IsPragma != ForPragma) {
+ assert(ForPragma && "non-pragma module enter/leave mismatch");
+ return nullptr;
+ }
+
auto &Info = BuildingSubmoduleStack.back();
Module *LeavingMod = Info.M;
@@ -711,7 +739,7 @@ void Preprocessor::LeaveSubmodule() {
// of pending names for the surrounding submodule.
BuildingSubmoduleStack.pop_back();
makeModuleVisible(LeavingMod, ImportLoc);
- return;
+ return LeavingMod;
}
// Create ModuleMacros for any macros defined in this submodule.
@@ -800,4 +828,5 @@ void Preprocessor::LeaveSubmodule() {
// A nested #include makes the included submodule visible.
makeModuleVisible(LeavingMod, ImportLoc);
+ return LeavingMod;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index 196223981d74..6c7663994a49 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -1453,7 +1453,7 @@ static bool EvaluateHasIncludeNext(Token &Tok,
} else if (PP.isInPrimaryFile()) {
Lookup = nullptr;
PP.Diag(Tok, diag::pp_include_next_in_primary);
- } else if (PP.getCurrentSubmodule()) {
+ } else if (PP.getCurrentLexerSubmodule()) {
// Start looking up in the directory *after* the one in which the current
// file would be found, if any.
assert(PP.getCurrentLexer() && "#include_next directive in macro?");
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index 576151a98b2c..99d56182c1bb 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -534,47 +534,6 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
}
}
-void Preprocessor::HandlePragmaModuleImport(Token &ImportTok) {
- SourceLocation ImportLoc = ImportTok.getLocation();
-
- Token Tok;
-
- llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 8> ModuleName;
- while (true) {
- LexUnexpandedToken(Tok);
- if (Tok.isNot(tok::identifier)) {
- Diag(Tok.getLocation(),
- diag::err_pragma_module_import_expected_module_name) << 0;
- return;
- }
-
- ModuleName.emplace_back(Tok.getIdentifierInfo(), Tok.getLocation());
-
- LexUnexpandedToken(Tok);
- assert(Tok.isNot(tok::eof));
- if (Tok.is(tok::eod))
- break;
- if (Tok.isNot(tok::period)) {
- Diag(Tok.getLocation(),
- diag::err_pragma_module_import_expected_module_name) << 1;
- return;
- }
- }
-
- // If we have a non-empty module path, load the named module.
- Module *Imported =
- TheModuleLoader.loadModule(ImportLoc, ModuleName, Module::Hidden,
- /*IsIncludeDirective=*/false);
- if (!Imported)
- return;
-
- makeModuleVisible(Imported, ImportLoc);
- EnterAnnotationToken(SourceRange(ImportLoc, Tok.getLocation()),
- tok::annot_module_include, Imported);
- if (Callbacks)
- Callbacks->moduleImport(ImportLoc, ModuleName, Imported);
-}
-
/// ParsePragmaPushOrPopMacro - Handle parsing of pragma push_macro/pop_macro.
/// Return the IdentifierInfo* associated with the macro to push or pop.
IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
@@ -1342,6 +1301,26 @@ public:
}
};
+static bool LexModuleName(
+ Preprocessor &PP, Token &Tok,
+ llvm::SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>>
+ &ModuleName) {
+ while (true) {
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isAnnotation() || !Tok.getIdentifierInfo()) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_expected_module_name)
+ << ModuleName.empty();
+ return true;
+ }
+
+ ModuleName.emplace_back(Tok.getIdentifierInfo(), Tok.getLocation());
+
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::period))
+ return false;
+ }
+}
+
/// Handle the clang \#pragma module import extension. The syntax is:
/// \code
/// #pragma clang module import some.module.name
@@ -1350,8 +1329,108 @@ struct PragmaModuleImportHandler : public PragmaHandler {
PragmaModuleImportHandler() : PragmaHandler("import") {}
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
- Token &ImportTok) override {
- PP.HandlePragmaModuleImport(ImportTok);
+ Token &Tok) override {
+ SourceLocation ImportLoc = Tok.getLocation();
+
+ // Read the module name.
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 8>
+ ModuleName;
+ if (LexModuleName(PP, Tok, ModuleName))
+ return;
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma";
+
+ // If we have a non-empty module path, load the named module.
+ Module *Imported =
+ PP.getModuleLoader().loadModule(ImportLoc, ModuleName, Module::Hidden,
+ /*IsIncludeDirective=*/false);
+ if (!Imported)
+ return;
+
+ PP.makeModuleVisible(Imported, ImportLoc);
+ PP.EnterAnnotationToken(SourceRange(ImportLoc, ModuleName.back().second),
+ tok::annot_module_include, Imported);
+ if (auto *CB = PP.getPPCallbacks())
+ CB->moduleImport(ImportLoc, ModuleName, Imported);
+ }
+};
+
+/// Handle the clang \#pragma module begin extension. The syntax is:
+/// \code
+/// #pragma clang module begin some.module.name
+/// ...
+/// #pragma clang module end
+/// \endcode
+struct PragmaModuleBeginHandler : public PragmaHandler {
+ PragmaModuleBeginHandler() : PragmaHandler("begin") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ SourceLocation BeginLoc = Tok.getLocation();
+
+ // Read the module name.
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 8>
+ ModuleName;
+ if (LexModuleName(PP, Tok, ModuleName))
+ return;
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma";
+
+ // We can only enter submodules of the current module.
+ StringRef Current = PP.getLangOpts().CurrentModule;
+ if (ModuleName.front().first->getName() != Current) {
+ PP.Diag(ModuleName.front().second, diag::err_pp_module_begin_wrong_module)
+ << ModuleName.front().first << (ModuleName.size() > 1)
+ << Current.empty() << Current;
+ return;
+ }
+
+ // Find the module we're entering. We require that a module map for it
+ // be loaded or implicitly loadable.
+ // FIXME: We could create the submodule here. We'd need to know whether
+ // it's supposed to be explicit, but not much else.
+ Module *M = PP.getHeaderSearchInfo().getModuleMap().findModule(Current);
+ if (!M) {
+ PP.Diag(ModuleName.front().second,
+ diag::err_pp_module_begin_no_module_map) << Current;
+ return;
+ }
+ for (unsigned I = 1; I != ModuleName.size(); ++I) {
+ auto *NewM = M->findSubmodule(ModuleName[I].first->getName());
+ if (!NewM) {
+ PP.Diag(ModuleName[I].second, diag::err_pp_module_begin_no_submodule)
+ << M->getFullModuleName() << ModuleName[I].first;
+ return;
+ }
+ M = NewM;
+ }
+
+ // Enter the scope of the submodule.
+ PP.EnterSubmodule(M, BeginLoc, /*ForPragma*/true);
+ PP.EnterAnnotationToken(SourceRange(BeginLoc, ModuleName.back().second),
+ tok::annot_module_begin, M);
+ }
+};
+
+/// Handle the clang \#pragma module end extension.
+struct PragmaModuleEndHandler : public PragmaHandler {
+ PragmaModuleEndHandler() : PragmaHandler("end") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ SourceLocation Loc = Tok.getLocation();
+
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma";
+
+ Module *M = PP.LeaveSubmodule(/*ForPragma*/true);
+ if (M)
+ PP.EnterAnnotationToken(SourceRange(Loc), tok::annot_module_end, M);
+ else
+ PP.Diag(Loc, diag::err_pp_module_end_without_module_begin);
}
};
@@ -1582,6 +1661,8 @@ void Preprocessor::RegisterBuiltinPragmas() {
auto *ModuleHandler = new PragmaNamespace("module");
AddPragmaHandler("clang", ModuleHandler);
ModuleHandler->AddPragma(new PragmaModuleImportHandler());
+ ModuleHandler->AddPragma(new PragmaModuleBeginHandler());
+ ModuleHandler->AddPragma(new PragmaModuleEndHandler());
AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index babef5dcc7ca..e409ab036535 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -85,10 +85,10 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
LastTokenWasAt(false), ModuleImportExpectsIdentifier(false),
CodeCompletionReached(false), CodeCompletionII(nullptr),
MainFileDir(nullptr), SkipMainFilePreamble(0, true), CurPPLexer(nullptr),
- CurDirLookup(nullptr), CurLexerKind(CLK_Lexer), CurSubmodule(nullptr),
- Callbacks(nullptr), CurSubmoduleState(&NullSubmoduleState),
- MacroArgCache(nullptr), Record(nullptr), MIChainHead(nullptr),
- DeserialMIChainHead(nullptr) {
+ CurDirLookup(nullptr), CurLexerKind(CLK_Lexer),
+ CurLexerSubmodule(nullptr), Callbacks(nullptr),
+ CurSubmoduleState(&NullSubmoduleState), MacroArgCache(nullptr),
+ Record(nullptr), MIChainHead(nullptr), DeserialMIChainHead(nullptr) {
OwnsHeaderSearch = OwnsHeaders;
CounterValue = 0; // __COUNTER__ starts at 0.
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
index a53c8014ebaf..049e046cece1 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -183,6 +183,12 @@ void TokenLexer::ExpandFunctionArguments() {
// preprocessor already verified that the following token is a macro name
// when the #define was parsed.
const Token &CurTok = Tokens[i];
+ // We don't want a space for the next token after a paste
+ // operator. In valid code, the token will get smooshed onto the
+ // preceding one anyway. In assembler-with-cpp mode, invalid
+ // pastes are allowed through: in this case, we do not want the
+ // extra whitespace to be added. For example, we want ". ## foo"
+ // -> ".foo" not ". foo".
if (i != 0 && !Tokens[i-1].is(tok::hashhash) && CurTok.hasLeadingSpace())
NextTokGetsSpace = true;
@@ -317,6 +323,7 @@ void TokenLexer::ExpandFunctionArguments() {
const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
unsigned NumToks = MacroArgs::getArgLength(ArgToks);
if (NumToks) { // Not an empty argument?
+ bool VaArgsPseudoPaste = false;
// If this is the GNU ", ## __VA_ARGS__" extension, and we just learned
// that __VA_ARGS__ expands to multiple tokens, avoid a pasting error when
// the expander trys to paste ',' with the first token of the __VA_ARGS__
@@ -325,6 +332,7 @@ void TokenLexer::ExpandFunctionArguments() {
ResultToks[ResultToks.size()-2].is(tok::comma) &&
(unsigned)ArgNo == Macro->getNumArgs()-1 &&
Macro->isVariadic()) {
+ VaArgsPseudoPaste = true;
// Remove the paste operator, report use of the extension.
PP.Diag(ResultToks.pop_back_val().getLocation(), diag::ext_paste_comma);
}
@@ -344,18 +352,16 @@ void TokenLexer::ExpandFunctionArguments() {
ResultToks.end()-NumToks, ResultToks.end());
}
- // If this token (the macro argument) was supposed to get leading
- // whitespace, transfer this information onto the first token of the
- // expansion.
- //
- // Do not do this if the paste operator occurs before the macro argument,
- // as in "A ## MACROARG". In valid code, the first token will get
- // smooshed onto the preceding one anyway (forming AMACROARG). In
- // assembler-with-cpp mode, invalid pastes are allowed through: in this
- // case, we do not want the extra whitespace to be added. For example,
- // we want ". ## foo" -> ".foo" not ". foo".
- if (NextTokGetsSpace)
- ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
+ // Transfer the leading whitespace information from the token
+ // (the macro argument) onto the first token of the
+ // expansion. Note that we don't do this for the GNU
+ // pseudo-paste extension ", ## __VA_ARGS__".
+ if (!VaArgsPseudoPaste) {
+ ResultToks[ResultToks.size() - NumToks].setFlagValue(Token::StartOfLine,
+ false);
+ ResultToks[ResultToks.size() - NumToks].setFlagValue(
+ Token::LeadingSpace, NextTokGetsSpace);
+ }
NextTokGetsSpace = false;
continue;
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
index b25152a3183e..ad7b319676e9 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -4151,8 +4151,6 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
}
if (!T.consumeClose()) {
- // FIXME: Warn that this syntax is deprecated, with a Fix-It suggesting
- // using __declspec(uuid()) instead.
Attrs.addNew(UuidIdent, SourceRange(UuidLoc, T.getCloseLocation()), nullptr,
SourceLocation(), ArgExprs.data(), ArgExprs.size(),
AttributeList::AS_Microsoft);
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 94e792c1d17d..2f493fa5fbef 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -383,6 +383,19 @@ void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
}
+void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
+ if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
+ return;
+ if (E->getType()->isNullPtrType())
+ return;
+ // nullptr only exists from C++11 on, so don't warn on its absence earlier.
+ if (!getLangOpts().CPlusPlus11)
+ return;
+
+ Diag(E->getLocStart(), diag::warn_zero_as_null_pointer_constant)
+ << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
+}
+
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
/// If there is already an implicit cast, merge into the existing one.
/// The result is of the given category.
@@ -407,6 +420,7 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
#endif
diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getLocStart());
+ diagnoseZeroToNullptrConversion(Kind, E);
QualType ExprTy = Context.getCanonicalType(E->getType());
QualType TypeTy = Context.getCanonicalType(Ty);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index a206100b89eb..14dd6267b854 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -3652,22 +3652,29 @@ static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
// and get its parameter list.
bool IsVariadic = false;
ArrayRef<ParmVarDecl *> Params;
- if (BlockScopeInfo *CurBlock = S.getCurBlock()) {
- IsVariadic = CurBlock->TheDecl->isVariadic();
- Params = CurBlock->TheDecl->parameters();
- } else if (FunctionDecl *FD = S.getCurFunctionDecl()) {
+ DeclContext *Caller = S.CurContext;
+ if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
+ IsVariadic = Block->isVariadic();
+ Params = Block->parameters();
+ } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
IsVariadic = FD->isVariadic();
Params = FD->parameters();
- } else if (ObjCMethodDecl *MD = S.getCurMethodDecl()) {
+ } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
IsVariadic = MD->isVariadic();
// FIXME: This isn't correct for methods (results in bogus warning).
Params = MD->parameters();
+ } else if (isa<CapturedDecl>(Caller)) {
+ // We don't support va_start in a CapturedDecl.
+ S.Diag(Fn->getLocStart(), diag::err_va_start_captured_stmt);
+ return true;
} else {
- llvm_unreachable("unknown va_start context");
+ // This must be some other declcontext that parses exprs.
+ S.Diag(Fn->getLocStart(), diag::err_va_start_outside_function);
+ return true;
}
if (!IsVariadic) {
- S.Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
+ S.Diag(Fn->getLocStart(), diag::err_va_start_fixed_function);
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index d4c0783638d1..2612023f59db 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -6070,12 +6070,24 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
- // OpenCL v1.2 s6.9.b p4:
- // The sampler type cannot be used with the __local and __global address
- // space qualifiers.
- if (R->isSamplerT() && (R.getAddressSpace() == LangAS::opencl_local ||
- R.getAddressSpace() == LangAS::opencl_global)) {
- Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
+ if (R->isSamplerT()) {
+ // OpenCL v1.2 s6.9.b p4:
+ // The sampler type cannot be used with the __local and __global address
+ // space qualifiers.
+ if (R.getAddressSpace() == LangAS::opencl_local ||
+ R.getAddressSpace() == LangAS::opencl_global) {
+ Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
+ }
+
+ // OpenCL v1.2 s6.12.14.1:
+ // A global sampler must be declared with either the constant address
+ // space qualifier or with the const qualifier.
+ if (DC->isTranslationUnit() &&
+ !(R.getAddressSpace() == LangAS::opencl_constant ||
+ R.isConstQualified())) {
+ Diag(D.getIdentifierLoc(), diag::err_opencl_nonconst_global_sampler);
+ D.setInvalidType();
+ }
}
// OpenCL v1.2 s6.9.r:
@@ -15902,7 +15914,7 @@ void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
VisibleModules.setVisible(Mod, DirectiveLoc);
}
-void Sema::ActOnModuleEnd(SourceLocation EofLoc, Module *Mod) {
+void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) {
if (getLangOpts().ModulesLocalVisibility) {
VisibleModules = std::move(ModuleScopes.back().OuterVisibleModules);
// Leaving a module hides namespace names, so our visible namespace cache
@@ -15914,12 +15926,19 @@ void Sema::ActOnModuleEnd(SourceLocation EofLoc, Module *Mod) {
"left the wrong module scope");
ModuleScopes.pop_back();
- // We got to the end of processing a #include of a local module. Create an
+ // We got to the end of processing a local module. Create an
// ImportDecl as we would for an imported module.
- FileID File = getSourceManager().getFileID(EofLoc);
- assert(File != getSourceManager().getMainFileID() &&
- "end of submodule in main source file");
- SourceLocation DirectiveLoc = getSourceManager().getIncludeLoc(File);
+ FileID File = getSourceManager().getFileID(EomLoc);
+ SourceLocation DirectiveLoc;
+ if (EomLoc == getSourceManager().getLocForEndOfFile(File)) {
+ // We reached the end of a #included module header. Use the #include loc.
+ assert(File != getSourceManager().getMainFileID() &&
+ "end of submodule in main source file");
+ DirectiveLoc = getSourceManager().getIncludeLoc(File);
+ } else {
+ // We reached an EOM pragma. Use the pragma location.
+ DirectiveLoc = EomLoc;
+ }
BuildModuleInclude(DirectiveLoc, Mod);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index bb5434a03a10..97d273f6ddb6 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -2891,6 +2891,28 @@ static void handleWorkGroupSize(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+// Handles intel_reqd_sub_group_size.
+static void handleSubGroupSize(Sema &S, Decl *D, const AttributeList &Attr) {
+ uint32_t SGSize;
+ const Expr *E = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, Attr, E, SGSize))
+ return;
+ if (SGSize == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
+ << Attr.getName() << E->getSourceRange();
+ return;
+ }
+
+ OpenCLIntelReqdSubGroupSizeAttr *Existing =
+ D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>();
+ if (Existing && Existing->getSubGroupSize() != SGSize)
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+
+ D->addAttr(::new (S.Context) OpenCLIntelReqdSubGroupSizeAttr(
+ Attr.getRange(), S.Context, SGSize,
+ Attr.getAttributeSpellingListIndex()));
+}
+
static void handleVecTypeHint(Sema &S, Decl *D, const AttributeList &Attr) {
if (!Attr.hasParsedType()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
@@ -5057,6 +5079,15 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
}
+ // FIXME: It'd be nice to also emit a fixit removing uuid(...) (and, if it's
+ // the only thing in the [] list, the [] too), and add an insertion of
+ // __declspec(uuid(...)). But sadly, neither the SourceLocs of the commas
+ // separating attributes nor of the [ and the ] are in the AST.
+ // Cf "SourceLocations of attribute list delimiters - [[ ... , ... ]] etc"
+ // on cfe-dev.
+ if (Attr.isMicrosoftAttribute()) // Check for [uuid(...)] spelling.
+ S.Diag(Attr.getLoc(), diag::warn_atl_uuid_deprecated);
+
UuidAttr *UA = S.mergeUuidAttr(D, Attr.getRange(),
Attr.getAttributeSpellingListIndex(), StrRef);
if (UA)
@@ -6157,6 +6188,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ReqdWorkGroupSize:
handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, Attr);
break;
+ case AttributeList::AT_OpenCLIntelReqdSubGroupSize:
+ handleSubGroupSize(S, D, Attr);
+ break;
case AttributeList::AT_VecTypeHint:
handleVecTypeHint(S, D, Attr);
break;
@@ -6521,6 +6555,9 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
<< A << ExpectedKernelFunction;
D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
+ Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+ D->setInvalidDecl();
}
}
}
@@ -7104,6 +7141,69 @@ void Sema::EmitAvailabilityWarning(AvailabilityResult AR,
namespace {
+/// Returns true if the given statement can be a body-like child of \p Parent.
+bool isBodyLikeChildStmt(const Stmt *S, const Stmt *Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::IfStmtClass:
+ return cast<IfStmt>(Parent)->getThen() == S ||
+ cast<IfStmt>(Parent)->getElse() == S;
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(Parent)->getBody() == S;
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(Parent)->getBody() == S;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(Parent)->getBody() == S;
+ case Stmt::CXXForRangeStmtClass:
+ return cast<CXXForRangeStmt>(Parent)->getBody() == S;
+ case Stmt::ObjCForCollectionStmtClass:
+ return cast<ObjCForCollectionStmt>(Parent)->getBody() == S;
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ return cast<SwitchCase>(Parent)->getSubStmt() == S;
+ default:
+ return false;
+ }
+}
+
+class StmtUSEFinder : public RecursiveASTVisitor<StmtUSEFinder> {
+ const Stmt *Target;
+
+public:
+ bool VisitStmt(Stmt *S) { return S != Target; }
+
+ /// Returns true if the given statement is present in the given declaration.
+ static bool isContained(const Stmt *Target, const Decl *D) {
+ StmtUSEFinder Visitor;
+ Visitor.Target = Target;
+ return !Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+
+/// Traverses the AST and finds the last statement that used a given
+/// declaration.
+class LastDeclUSEFinder : public RecursiveASTVisitor<LastDeclUSEFinder> {
+ const Decl *D;
+
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ if (DRE->getDecl() == D)
+ return false;
+ return true;
+ }
+
+ static const Stmt *findLastStmtThatUsesDecl(const Decl *D,
+ const CompoundStmt *Scope) {
+ LastDeclUSEFinder Visitor;
+ Visitor.D = D;
+ for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
+ const Stmt *S = *I;
+ if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
+ return S;
+ }
+ return nullptr;
+ }
+};
+
/// \brief This class implements -Wunguarded-availability.
///
/// This is done with a traversal of the AST of a function that makes reference
@@ -7119,6 +7219,7 @@ class DiagnoseUnguardedAvailability
/// Stack of potentially nested 'if (@available(...))'s.
SmallVector<VersionTuple, 8> AvailabilityStack;
+ SmallVector<const Stmt *, 16> StmtStack;
void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range);
@@ -7129,6 +7230,15 @@ public:
SemaRef.Context.getTargetInfo().getPlatformMinVersion());
}
+ bool TraverseStmt(Stmt *S) {
+ if (!S)
+ return true;
+ StmtStack.push_back(S);
+ bool Result = Base::TraverseStmt(S);
+ StmtStack.pop_back();
+ return Result;
+ }
+
void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
bool TraverseIfStmt(IfStmt *If);
@@ -7186,9 +7296,73 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
SemaRef.Diag(D->getLocation(), diag::note_availability_specified_here)
<< D << /* partial */ 3;
- // FIXME: Replace this with a fixit diagnostic.
- SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
- << Range << D;
+ auto FixitDiag =
+ SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
+ << Range << D
+ << (SemaRef.getLangOpts().ObjC1 ? /*@available*/ 0
+ : /*__builtin_available*/ 1);
+
+ // Find the statement which should be enclosed in the if @available check.
+ if (StmtStack.empty())
+ return;
+ const Stmt *StmtOfUse = StmtStack.back();
+ const CompoundStmt *Scope = nullptr;
+ for (const Stmt *S : llvm::reverse(StmtStack)) {
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ Scope = CS;
+ break;
+ }
+ if (isBodyLikeChildStmt(StmtOfUse, S)) {
+ // The declaration won't be seen outside of the statement, so we don't
+ // have to wrap the uses of any declared variables in if (@available).
+ // Therefore we can avoid setting Scope here.
+ break;
+ }
+ StmtOfUse = S;
+ }
+ const Stmt *LastStmtOfUse = nullptr;
+ if (isa<DeclStmt>(StmtOfUse) && Scope) {
+ for (const Decl *D : cast<DeclStmt>(StmtOfUse)->decls()) {
+ if (StmtUSEFinder::isContained(StmtStack.back(), D)) {
+ LastStmtOfUse = LastDeclUSEFinder::findLastStmtThatUsesDecl(D, Scope);
+ break;
+ }
+ }
+ }
+
+ const SourceManager &SM = SemaRef.getSourceManager();
+ SourceLocation IfInsertionLoc =
+ SM.getExpansionLoc(StmtOfUse->getLocStart());
+ SourceLocation StmtEndLoc =
+ SM.getExpansionRange(
+ (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getLocEnd())
+ .second;
+ if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
+ return;
+
+ StringRef Indentation = Lexer::getIndentationForLine(IfInsertionLoc, SM);
+ const char *ExtraIndentation = " ";
+ std::string FixItString;
+ llvm::raw_string_ostream FixItOS(FixItString);
+ FixItOS << "if (" << (SemaRef.getLangOpts().ObjC1 ? "@available"
+ : "__builtin_available")
+ << "(" << SemaRef.getASTContext().getTargetInfo().getPlatformName()
+ << " " << Introduced.getAsString() << ", *)) {\n"
+ << Indentation << ExtraIndentation;
+ FixitDiag << FixItHint::CreateInsertion(IfInsertionLoc, FixItOS.str());
+ SourceLocation ElseInsertionLoc = Lexer::findLocationAfterToken(
+ StmtEndLoc, tok::semi, SM, SemaRef.getLangOpts(),
+ /*SkipTrailingWhitespaceAndNewLine=*/false);
+ if (ElseInsertionLoc.isInvalid())
+ ElseInsertionLoc =
+ Lexer::getLocForEndOfToken(StmtEndLoc, 0, SM, SemaRef.getLangOpts());
+ FixItOS.str().clear();
+ FixItOS << "\n"
+ << Indentation << "} else {\n"
+ << Indentation << ExtraIndentation
+ << "// Fallback on earlier versions\n"
+ << Indentation << "}";
+ FixitDiag << FixItHint::CreateInsertion(ElseInsertionLoc, FixItOS.str());
}
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index fe9ba6f1f811..370461c4a24e 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -4347,10 +4347,8 @@ static void checkObjCMethodX86VectorTypes(Sema &SemaRef,
AcceptedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/11);
else
return;
- VersionTuple MethodVersion = Method->getVersionIntroduced();
if (SemaRef.getASTContext().getTargetInfo().getPlatformMinVersion() >=
- AcceptedInVersion &&
- (MethodVersion.empty() || MethodVersion >= AcceptedInVersion))
+ AcceptedInVersion)
return;
SemaRef.Diag(Loc, diag::err_objc_method_unsupported_param_ret_type)
<< T << (Method->getReturnType()->isVectorType() ? /*return value*/ 1
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index d63151ef6759..849e978e2d86 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -5399,9 +5399,11 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// that the callee might not preserve them. This is easy to diagnose here,
// but can be very challenging to debug.
if (auto *Caller = getCurFunctionDecl())
- if (Caller->hasAttr<ARMInterruptAttr>())
- if (!FDecl || !FDecl->hasAttr<ARMInterruptAttr>())
+ if (Caller->hasAttr<ARMInterruptAttr>()) {
+ bool VFP = Context.getTargetInfo().hasFeature("vfp");
+ if (VFP && (!FDecl || !FDecl->hasAttr<ARMInterruptAttr>()))
Diag(Fn->getExprLoc(), diag::warn_arm_interrupt_calling_convention);
+ }
// Promote the function operand.
// We special-case function promotion here because we only allow promoting
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 9ffc23b5adba..5d7eada28717 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -2828,7 +2828,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
// [...] If the first overload resolution fails or was not performed, or
// if the type of the first parameter of the selected constructor is not
- // an rvalue reference to the object’s type (possibly cv-qualified),
+ // an rvalue reference to the object's type (possibly cv-qualified),
// overload resolution is performed again, considering the object as an
// lvalue.
if (!RRefType ||
diff --git a/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
index 738e610ed946..f9a230eb63a0 100644
--- a/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -146,12 +146,8 @@ class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
loadFromDirectory(StringRef Directory, std::string &ErrorMessage) override {
SmallString<1024> JSONDatabasePath(Directory);
llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
- std::unique_ptr<CompilationDatabase> Database(
- JSONCompilationDatabase::loadFromFile(
- JSONDatabasePath, ErrorMessage, JSONCommandLineSyntax::AutoDetect));
- if (!Database)
- return nullptr;
- return Database;
+ return JSONCompilationDatabase::loadFromFile(
+ JSONDatabasePath, ErrorMessage, JSONCommandLineSyntax::AutoDetect);
}
};
diff --git a/contrib/llvm/tools/lld/CMakeLists.txt b/contrib/llvm/tools/lld/CMakeLists.txt
index 7fcb1a748ffc..e2ab0e35f1ab 100644
--- a/contrib/llvm/tools/lld/CMakeLists.txt
+++ b/contrib/llvm/tools/lld/CMakeLists.txt
@@ -221,3 +221,4 @@ endif()
add_subdirectory(docs)
add_subdirectory(COFF)
add_subdirectory(ELF)
+
diff --git a/contrib/llvm/tools/lld/COFF/Chunks.h b/contrib/llvm/tools/lld/COFF/Chunks.h
index 44d7f31afc67..f7412517765c 100644
--- a/contrib/llvm/tools/lld/COFF/Chunks.h
+++ b/contrib/llvm/tools/lld/COFF/Chunks.h
@@ -201,7 +201,7 @@ private:
// Used for ICF (Identical COMDAT Folding)
void replace(SectionChunk *Other);
- uint32_t Color[2] = {0, 0};
+ uint32_t Class[2] = {0, 0};
// Sym points to a section symbol if this is a COMDAT chunk.
DefinedRegular *Sym = nullptr;
diff --git a/contrib/llvm/tools/lld/COFF/ICF.cpp b/contrib/llvm/tools/lld/COFF/ICF.cpp
index fe59de6efa54..9a43f2bd43f5 100644
--- a/contrib/llvm/tools/lld/COFF/ICF.cpp
+++ b/contrib/llvm/tools/lld/COFF/ICF.cpp
@@ -49,10 +49,10 @@ private:
size_t findBoundary(size_t Begin, size_t End);
- void forEachColorRange(size_t Begin, size_t End,
+ void forEachClassRange(size_t Begin, size_t End,
std::function<void(size_t, size_t)> Fn);
- void forEachColor(std::function<void(size_t, size_t)> Fn);
+ void forEachClass(std::function<void(size_t, size_t)> Fn);
std::vector<SectionChunk *> Chunks;
int Cnt = 0;
@@ -85,7 +85,7 @@ bool ICF::isEligible(SectionChunk *C) {
return C->isCOMDAT() && C->isLive() && Global && Executable && !Writable;
}
-// Split a range into smaller ranges by recoloring sections
+// Split an equivalence class into smaller classes.
void ICF::segregate(size_t Begin, size_t End, bool Constant) {
while (Begin < End) {
// Divide [Begin, End) into two. Let Mid be the start index of the
@@ -101,7 +101,7 @@ void ICF::segregate(size_t Begin, size_t End, bool Constant) {
// Split [Begin, End) into [Begin, Mid) and [Mid, End).
uint32_t Id = NextId++;
for (size_t I = Begin; I < Mid; ++I)
- Chunks[I]->Color[(Cnt + 1) % 2] = Id;
+ Chunks[I]->Class[(Cnt + 1) % 2] = Id;
// If we created a group, we need to iterate the main loop again.
if (Mid != End)
@@ -130,7 +130,7 @@ bool ICF::equalsConstant(const SectionChunk *A, const SectionChunk *B) {
if (auto *D1 = dyn_cast<DefinedRegular>(B1))
if (auto *D2 = dyn_cast<DefinedRegular>(B2))
return D1->getValue() == D2->getValue() &&
- D1->getChunk()->Color[Cnt % 2] == D2->getChunk()->Color[Cnt % 2];
+ D1->getChunk()->Class[Cnt % 2] == D2->getChunk()->Class[Cnt % 2];
return false;
};
if (!std::equal(A->Relocs.begin(), A->Relocs.end(), B->Relocs.begin(), Eq))
@@ -155,7 +155,7 @@ bool ICF::equalsVariable(const SectionChunk *A, const SectionChunk *B) {
return true;
if (auto *D1 = dyn_cast<DefinedRegular>(B1))
if (auto *D2 = dyn_cast<DefinedRegular>(B2))
- return D1->getChunk()->Color[Cnt % 2] == D2->getChunk()->Color[Cnt % 2];
+ return D1->getChunk()->Class[Cnt % 2] == D2->getChunk()->Class[Cnt % 2];
return false;
};
return std::equal(A->Relocs.begin(), A->Relocs.end(), B->Relocs.begin(), Eq);
@@ -163,12 +163,12 @@ bool ICF::equalsVariable(const SectionChunk *A, const SectionChunk *B) {
size_t ICF::findBoundary(size_t Begin, size_t End) {
for (size_t I = Begin + 1; I < End; ++I)
- if (Chunks[Begin]->Color[Cnt % 2] != Chunks[I]->Color[Cnt % 2])
+ if (Chunks[Begin]->Class[Cnt % 2] != Chunks[I]->Class[Cnt % 2])
return I;
return End;
}
-void ICF::forEachColorRange(size_t Begin, size_t End,
+void ICF::forEachClassRange(size_t Begin, size_t End,
std::function<void(size_t, size_t)> Fn) {
if (Begin > 0)
Begin = findBoundary(Begin - 1, End);
@@ -180,12 +180,12 @@ void ICF::forEachColorRange(size_t Begin, size_t End,
}
}
-// Call Fn on each color group.
-void ICF::forEachColor(std::function<void(size_t, size_t)> Fn) {
+// Call Fn on each class group.
+void ICF::forEachClass(std::function<void(size_t, size_t)> Fn) {
// If the number of sections are too small to use threading,
// call Fn sequentially.
if (Chunks.size() < 1024) {
- forEachColorRange(0, Chunks.size(), Fn);
+ forEachClassRange(0, Chunks.size(), Fn);
return;
}
@@ -193,9 +193,9 @@ void ICF::forEachColor(std::function<void(size_t, size_t)> Fn) {
size_t NumShards = 256;
size_t Step = Chunks.size() / NumShards;
parallel_for(size_t(0), NumShards, [&](size_t I) {
- forEachColorRange(I * Step, (I + 1) * Step, Fn);
+ forEachClassRange(I * Step, (I + 1) * Step, Fn);
});
- forEachColorRange(Step * NumShards, Chunks.size(), Fn);
+ forEachClassRange(Step * NumShards, Chunks.size(), Fn);
}
// Merge identical COMDAT sections.
@@ -209,11 +209,11 @@ void ICF::run(const std::vector<Chunk *> &Vec) {
continue;
if (isEligible(SC)) {
- // Set MSB to 1 to avoid collisions with non-hash colors.
- SC->Color[0] = getHash(SC) | (1 << 31);
+ // Set MSB to 1 to avoid collisions with non-hash classs.
+ SC->Class[0] = getHash(SC) | (1 << 31);
Chunks.push_back(SC);
} else {
- SC->Color[0] = NextId++;
+ SC->Class[0] = NextId++;
}
}
@@ -224,25 +224,25 @@ void ICF::run(const std::vector<Chunk *> &Vec) {
// the same group are consecutive in the vector.
std::stable_sort(Chunks.begin(), Chunks.end(),
[](SectionChunk *A, SectionChunk *B) {
- return A->Color[0] < B->Color[0];
+ return A->Class[0] < B->Class[0];
});
// Compare static contents and assign unique IDs for each static content.
- forEachColor([&](size_t Begin, size_t End) { segregate(Begin, End, true); });
+ forEachClass([&](size_t Begin, size_t End) { segregate(Begin, End, true); });
++Cnt;
// Split groups by comparing relocations until convergence is obtained.
do {
Repeat = false;
- forEachColor(
+ forEachClass(
[&](size_t Begin, size_t End) { segregate(Begin, End, false); });
++Cnt;
} while (Repeat);
log("ICF needed " + Twine(Cnt) + " iterations");
- // Merge sections in the same colors.
- forEachColor([&](size_t Begin, size_t End) {
+ // Merge sections in the same classs.
+ forEachClass([&](size_t Begin, size_t End) {
if (End - Begin == 1)
return;
diff --git a/contrib/llvm/tools/lld/COFF/PDB.cpp b/contrib/llvm/tools/lld/COFF/PDB.cpp
index 20411a703e24..61b0c64de3a8 100644
--- a/contrib/llvm/tools/lld/COFF/PDB.cpp
+++ b/contrib/llvm/tools/lld/COFF/PDB.cpp
@@ -133,7 +133,7 @@ static void dumpDebugT(ScopedPrinter &W, ObjectFile *File) {
if (Data.empty())
return;
- TypeDatabase TDB;
+ TypeDatabase TDB(0);
TypeDumpVisitor TDV(TDB, &W, false);
// Use a default implementation that does not follow type servers and instead
// just dumps the contents of the TypeServer2 record.
@@ -154,7 +154,7 @@ static void dumpDebugS(ScopedPrinter &W, ObjectFile *File) {
if (auto EC = Reader.readArray(Symbols, Reader.getLength()))
fatal(EC, "StreamReader.readArray<CVSymbolArray> failed");
- TypeDatabase TDB;
+ TypeDatabase TDB(0);
CVSymbolDumper SymbolDumper(W, TDB, nullptr, false);
if (auto EC = SymbolDumper.dump(Symbols))
fatal(EC, "CVSymbolDumper::dump failed");
diff --git a/contrib/llvm/tools/lld/ELF/Config.h b/contrib/llvm/tools/lld/ELF/Config.h
index 1ace4aa26fdb..0321c84e7106 100644
--- a/contrib/llvm/tools/lld/ELF/Config.h
+++ b/contrib/llvm/tools/lld/ELF/Config.h
@@ -73,6 +73,7 @@ struct VersionDefinition {
// Most fields are initialized by the driver.
struct Configuration {
InputFile *FirstElf = nullptr;
+ bool HasStaticTlsModel = false;
uint8_t OSABI = 0;
llvm::CachePruningPolicy ThinLTOCachePolicy;
llvm::StringMap<uint64_t> SectionStartMap;
@@ -99,7 +100,6 @@ struct Configuration {
std::vector<SymbolVersion> VersionScriptLocals;
std::vector<uint8_t> BuildIdVector;
bool AllowMultipleDefinition;
- bool ArchiveWithoutSymbolsSeen = false;
bool AsNeeded = false;
bool Bsymbolic;
bool BsymbolicFunctions;
diff --git a/contrib/llvm/tools/lld/ELF/Driver.cpp b/contrib/llvm/tools/lld/ELF/Driver.cpp
index 6a71eb3ee490..c2cfe3c4129e 100644
--- a/contrib/llvm/tools/lld/ELF/Driver.cpp
+++ b/contrib/llvm/tools/lld/ELF/Driver.cpp
@@ -123,13 +123,13 @@ static std::tuple<ELFKind, uint16_t, uint8_t> parseEmulation(StringRef Emul) {
// Returns slices of MB by parsing MB as an archive file.
// Each slice consists of a member file in the archive.
-std::vector<MemoryBufferRef>
-static getArchiveMembers(MemoryBufferRef MB) {
+std::vector<std::pair<MemoryBufferRef, uint64_t>> static getArchiveMembers(
+ MemoryBufferRef MB) {
std::unique_ptr<Archive> File =
check(Archive::create(MB),
MB.getBufferIdentifier() + ": failed to parse archive");
- std::vector<MemoryBufferRef> V;
+ std::vector<std::pair<MemoryBufferRef, uint64_t>> V;
Error Err = Error::success();
for (const ErrorOr<Archive::Child> &COrErr : File->children(Err)) {
Archive::Child C =
@@ -139,7 +139,7 @@ static getArchiveMembers(MemoryBufferRef MB) {
check(C.getMemoryBufferRef(),
MB.getBufferIdentifier() +
": could not get the buffer for a child of the archive");
- V.push_back(MBRef);
+ V.push_back(std::make_pair(MBRef, C.getChildOffset()));
}
if (Err)
fatal(MB.getBufferIdentifier() + ": Archive::children failed: " +
@@ -152,8 +152,7 @@ static getArchiveMembers(MemoryBufferRef MB) {
return V;
}
-// Opens and parses a file. Path has to be resolved already.
-// Newly created memory buffers are owned by this driver.
+// Opens a file and create a file object. Path has to be resolved already.
void LinkerDriver::addFile(StringRef Path, bool WithLOption) {
using namespace sys::fs;
@@ -171,14 +170,31 @@ void LinkerDriver::addFile(StringRef Path, bool WithLOption) {
case file_magic::unknown:
readLinkerScript(MBRef);
return;
- case file_magic::archive:
+ case file_magic::archive: {
+ // Handle -whole-archive.
if (InWholeArchive) {
- for (MemoryBufferRef MB : getArchiveMembers(MBRef))
- Files.push_back(createObjectFile(MB, Path));
+ for (const auto &P : getArchiveMembers(MBRef))
+ Files.push_back(createObjectFile(P.first, Path, P.second));
return;
}
- Files.push_back(make<ArchiveFile>(MBRef));
+
+ std::unique_ptr<Archive> File =
+ check(Archive::create(MBRef), Path + ": failed to parse archive");
+
+ // If an archive file has no symbol table, it is likely that a user
+ // is attempting LTO and using a default ar command that doesn't
+ // understand the LLVM bitcode file. It is a pretty common error, so
+ // we'll handle it as if it had a symbol table.
+ if (!File->hasSymbolTable()) {
+ for (const auto &P : getArchiveMembers(MBRef))
+ Files.push_back(make<LazyObjectFile>(P.first, Path, P.second));
+ return;
+ }
+
+ // Handle the regular case.
+ Files.push_back(make<ArchiveFile>(std::move(File)));
return;
+ }
case file_magic::elf_shared_object:
if (Config->Relocatable) {
error("attempted static link of dynamic object " + Path);
@@ -199,7 +215,7 @@ void LinkerDriver::addFile(StringRef Path, bool WithLOption) {
return;
default:
if (InLib)
- Files.push_back(make<LazyObjectFile>(MBRef));
+ Files.push_back(make<LazyObjectFile>(MBRef, "", 0));
else
Files.push_back(createObjectFile(MBRef));
}
diff --git a/contrib/llvm/tools/lld/ELF/InputFiles.cpp b/contrib/llvm/tools/lld/ELF/InputFiles.cpp
index 260a78ebbf8e..5f94fc9338a4 100644
--- a/contrib/llvm/tools/lld/ELF/InputFiles.cpp
+++ b/contrib/llvm/tools/lld/ELF/InputFiles.cpp
@@ -596,17 +596,13 @@ SymbolBody *elf::ObjectFile<ELFT>::createSymbolBody(const Elf_Sym *Sym) {
}
}
-template <class ELFT> void ArchiveFile::parse() {
- File = check(Archive::create(MB),
- MB.getBufferIdentifier() + ": failed to parse archive");
+ArchiveFile::ArchiveFile(std::unique_ptr<Archive> &&File)
+ : InputFile(ArchiveKind, File->getMemoryBufferRef()),
+ File(std::move(File)) {}
- // Read the symbol table to construct Lazy objects.
- for (const Archive::Symbol &Sym : File->symbols()) {
+template <class ELFT> void ArchiveFile::parse() {
+ for (const Archive::Symbol &Sym : File->symbols())
Symtab<ELFT>::X->addLazyArchive(this, Sym);
- }
-
- if (File->symbols().begin() == File->symbols().end())
- Config->ArchiveWithoutSymbolsSeen = true;
}
// Returns a buffer pointing to a member file containing a given symbol.
@@ -981,6 +977,13 @@ MemoryBufferRef LazyObjectFile::getBuffer() {
return MB;
}
+InputFile *LazyObjectFile::fetch() {
+ MemoryBufferRef MBRef = getBuffer();
+ if (MBRef.getBuffer().empty())
+ return nullptr;
+ return createObjectFile(MBRef, ArchiveName, OffsetInArchive);
+}
+
template <class ELFT> void LazyObjectFile::parse() {
for (StringRef Sym : getSymbols())
Symtab<ELFT>::X->addLazyObject(Sym, *this);
diff --git a/contrib/llvm/tools/lld/ELF/InputFiles.h b/contrib/llvm/tools/lld/ELF/InputFiles.h
index d0a45a4a98cf..6daf26649859 100644
--- a/contrib/llvm/tools/lld/ELF/InputFiles.h
+++ b/contrib/llvm/tools/lld/ELF/InputFiles.h
@@ -219,7 +219,11 @@ private:
// archive file semantics.
class LazyObjectFile : public InputFile {
public:
- explicit LazyObjectFile(MemoryBufferRef M) : InputFile(LazyObjectKind, M) {}
+ LazyObjectFile(MemoryBufferRef M, StringRef ArchiveName,
+ uint64_t OffsetInArchive)
+ : InputFile(LazyObjectKind, M), OffsetInArchive(OffsetInArchive) {
+ this->ArchiveName = ArchiveName;
+ }
static bool classof(const InputFile *F) {
return F->kind() == LazyObjectKind;
@@ -227,6 +231,7 @@ public:
template <class ELFT> void parse();
MemoryBufferRef getBuffer();
+ InputFile *fetch();
private:
std::vector<StringRef> getSymbols();
@@ -234,12 +239,13 @@ private:
std::vector<StringRef> getBitcodeSymbols();
bool Seen = false;
+ uint64_t OffsetInArchive;
};
// An ArchiveFile object represents a .a file.
class ArchiveFile : public InputFile {
public:
- explicit ArchiveFile(MemoryBufferRef M) : InputFile(ArchiveKind, M) {}
+ explicit ArchiveFile(std::unique_ptr<Archive> &&File);
static bool classof(const InputFile *F) { return F->kind() == ArchiveKind; }
template <class ELFT> void parse();
diff --git a/contrib/llvm/tools/lld/ELF/LinkerScript.cpp b/contrib/llvm/tools/lld/ELF/LinkerScript.cpp
index 3f872c65897f..d7858e173c7b 100644
--- a/contrib/llvm/tools/lld/ELF/LinkerScript.cpp
+++ b/contrib/llvm/tools/lld/ELF/LinkerScript.cpp
@@ -406,27 +406,22 @@ void LinkerScript::processCommands(OutputSectionFactory &Factory) {
}
// Add input sections to an output section.
- unsigned Pos = 0;
- for (InputSectionBase *S : V) {
- // The actual offset will be computed during
- // assignAddresses. For now, use the index as a very crude
- // approximation so that it is at least easy for other code to
- // know the section order.
- cast<InputSection>(S)->OutSecOff = Pos++;
+ for (InputSectionBase *S : V)
Factory.addInputSec(S, Cmd->Name, Cmd->Sec);
+ if (OutputSection *Sec = Cmd->Sec) {
+ assert(Sec->SectionIndex == INT_MAX);
+ Sec->SectionIndex = I;
}
}
}
CurOutSec = nullptr;
}
-void LinkerScript::fabricateDefaultCommands(bool AllocateHeader) {
+void LinkerScript::fabricateDefaultCommands() {
std::vector<BaseCommand *> Commands;
// Define start address
- uint64_t StartAddr = Config->ImageBase;
- if (AllocateHeader)
- StartAddr += elf::getHeaderSize();
+ uint64_t StartAddr = Config->ImageBase + elf::getHeaderSize();
// The Sections with -T<section> have been sorted in order of ascending
// address. We must lower StartAddr if the lowest -T<section address> as
@@ -488,6 +483,11 @@ void LinkerScript::addOrphanSections(OutputSectionFactory &Factory) {
} else {
auto *Cmd = cast<OutputSectionCommand>(*I);
Factory.addInputSec(S, Name, Cmd->Sec);
+ if (OutputSection *Sec = Cmd->Sec) {
+ unsigned Index = std::distance(Opt.Commands.begin(), I);
+ assert(Sec->SectionIndex == INT_MAX || Sec->SectionIndex == Index);
+ Sec->SectionIndex = Index;
+ }
auto *ISD = make<InputSectionDescription>("");
ISD->Sections.push_back(S);
Cmd->Commands.push_back(ISD);
@@ -495,17 +495,22 @@ void LinkerScript::addOrphanSections(OutputSectionFactory &Factory) {
}
}
-static bool isTbss(OutputSection *Sec) {
- return (Sec->Flags & SHF_TLS) && Sec->Type == SHT_NOBITS;
+uint64_t LinkerScript::advance(uint64_t Size, unsigned Align) {
+ bool IsTbss = (CurOutSec->Flags & SHF_TLS) && CurOutSec->Type == SHT_NOBITS;
+ uint64_t Start = IsTbss ? Dot + ThreadBssOffset : Dot;
+ Start = alignTo(Start, Align);
+ uint64_t End = Start + Size;
+
+ if (IsTbss)
+ ThreadBssOffset = End - Dot;
+ else
+ Dot = End;
+ return End;
}
void LinkerScript::output(InputSection *S) {
- bool IsTbss = isTbss(CurOutSec);
-
- uint64_t Pos = IsTbss ? Dot + ThreadBssOffset : Dot;
- Pos = alignTo(Pos, S->Alignment);
- S->OutSecOff = Pos - CurOutSec->Addr;
- Pos += S->getSize();
+ uint64_t Pos = advance(S->getSize(), S->Alignment);
+ S->OutSecOff = Pos - S->getSize() - CurOutSec->Addr;
// Update output section size after adding each section. This is so that
// SIZEOF works correctly in the case below:
@@ -524,11 +529,6 @@ void LinkerScript::output(InputSection *S) {
" bytes");
}
}
-
- if (IsTbss)
- ThreadBssOffset = Pos - Dot;
- else
- Dot = Pos;
}
void LinkerScript::switchTo(OutputSection *Sec) {
@@ -536,9 +536,7 @@ void LinkerScript::switchTo(OutputSection *Sec) {
return;
CurOutSec = Sec;
-
- Dot = alignTo(Dot, CurOutSec->Alignment);
- CurOutSec->Addr = isTbss(CurOutSec) ? Dot + ThreadBssOffset : Dot;
+ CurOutSec->Addr = advance(0, CurOutSec->Alignment);
// If neither AT nor AT> is specified for an allocatable section, the linker
// will set the LMA such that the difference between VMA and LMA for the
@@ -643,6 +641,11 @@ void LinkerScript::assignOffsets(OutputSectionCommand *Cmd) {
Dot = CurMemRegion->Offset;
switchTo(Sec);
+ // We do not support custom layout for compressed debug sectons.
+ // At this point we already know their size and have compressed content.
+ if (CurOutSec->Flags & SHF_COMPRESSED)
+ return;
+
for (BaseCommand *C : Cmd->Commands)
process(*C);
}
@@ -678,8 +681,9 @@ void LinkerScript::adjustSectionsBeforeSorting() {
// consequeces and gives us a section to put the symbol in.
uint64_t Flags = SHF_ALLOC;
uint32_t Type = SHT_PROGBITS;
- for (BaseCommand *Base : Opt.Commands) {
- auto *Cmd = dyn_cast<OutputSectionCommand>(Base);
+
+ for (int I = 0, E = Opt.Commands.size(); I != E; ++I) {
+ auto *Cmd = dyn_cast<OutputSectionCommand>(Opt.Commands[I]);
if (!Cmd)
continue;
if (OutputSection *Sec = Cmd->Sec) {
@@ -692,6 +696,7 @@ void LinkerScript::adjustSectionsBeforeSorting() {
continue;
auto *OutSec = make<OutputSection>(Cmd->Name, Type, Flags);
+ OutSec->SectionIndex = I;
OutputSections->push_back(OutSec);
Cmd->Sec = OutSec;
}
@@ -894,6 +899,48 @@ void LinkerScript::synchronize() {
}
}
+static bool allocateHeaders(std::vector<PhdrEntry> &Phdrs,
+ ArrayRef<OutputSection *> OutputSections,
+ uint64_t Min) {
+ auto FirstPTLoad =
+ std::find_if(Phdrs.begin(), Phdrs.end(),
+ [](const PhdrEntry &E) { return E.p_type == PT_LOAD; });
+ if (FirstPTLoad == Phdrs.end())
+ return false;
+
+ uint64_t HeaderSize = getHeaderSize();
+ if (HeaderSize <= Min || Script->hasPhdrsCommands()) {
+ Min = alignDown(Min - HeaderSize, Config->MaxPageSize);
+ Out::ElfHeader->Addr = Min;
+ Out::ProgramHeaders->Addr = Min + Out::ElfHeader->Size;
+ return true;
+ }
+
+ assert(FirstPTLoad->First == Out::ElfHeader);
+ OutputSection *ActualFirst = nullptr;
+ for (OutputSection *Sec : OutputSections) {
+ if (Sec->FirstInPtLoad == Out::ElfHeader) {
+ ActualFirst = Sec;
+ break;
+ }
+ }
+ if (ActualFirst) {
+ for (OutputSection *Sec : OutputSections)
+ if (Sec->FirstInPtLoad == Out::ElfHeader)
+ Sec->FirstInPtLoad = ActualFirst;
+ FirstPTLoad->First = ActualFirst;
+ } else {
+ Phdrs.erase(FirstPTLoad);
+ }
+
+ auto PhdrI = std::find_if(Phdrs.begin(), Phdrs.end(), [](const PhdrEntry &E) {
+ return E.p_type == PT_PHDR;
+ });
+ if (PhdrI != Phdrs.end())
+ Phdrs.erase(PhdrI);
+ return false;
+}
+
void LinkerScript::assignAddresses(std::vector<PhdrEntry> &Phdrs) {
// Assign addresses as instructed by linker script SECTIONS sub-commands.
Dot = 0;
@@ -994,12 +1041,17 @@ static void writeInt(uint8_t *Buf, uint64_t Data, uint64_t Size) {
llvm_unreachable("unsupported Size argument");
}
-void LinkerScript::writeDataBytes(StringRef Name, uint8_t *Buf) {
- int I = getSectionIndex(Name);
- if (I == INT_MAX)
+void LinkerScript::writeDataBytes(OutputSection *Sec, uint8_t *Buf) {
+ auto I = std::find_if(Opt.Commands.begin(), Opt.Commands.end(),
+ [=](BaseCommand *Base) {
+ if (auto *Cmd = dyn_cast<OutputSectionCommand>(Base))
+ if (Cmd->Sec == Sec)
+ return true;
+ return false;
+ });
+ if (I == Opt.Commands.end())
return;
-
- auto *Cmd = dyn_cast<OutputSectionCommand>(Opt.Commands[I]);
+ auto *Cmd = cast<OutputSectionCommand>(*I);
for (BaseCommand *Base : Cmd->Commands)
if (auto *Data = dyn_cast<BytesDataCommand>(Base))
writeInt(Buf + Data->Offset, Data->Expression().getValue(), Data->Size);
@@ -1013,18 +1065,6 @@ bool LinkerScript::hasLMA(StringRef Name) {
return false;
}
-// Returns the index of the given section name in linker script
-// SECTIONS commands. Sections are laid out as the same order as they
-// were in the script. If a given name did not appear in the script,
-// it returns INT_MAX, so that it will be laid out at end of file.
-int LinkerScript::getSectionIndex(StringRef Name) {
- for (int I = 0, E = Opt.Commands.size(); I != E; ++I)
- if (auto *Cmd = dyn_cast<OutputSectionCommand>(Opt.Commands[I]))
- if (Cmd->Name == Name)
- return I;
- return INT_MAX;
-}
-
ExprValue LinkerScript::getSymbolValue(const Twine &Loc, StringRef S) {
if (S == ".")
return {CurOutSec, Dot - CurOutSec->Addr};
diff --git a/contrib/llvm/tools/lld/ELF/LinkerScript.h b/contrib/llvm/tools/lld/ELF/LinkerScript.h
index dd96d335a660..7bcd21c87602 100644
--- a/contrib/llvm/tools/lld/ELF/LinkerScript.h
+++ b/contrib/llvm/tools/lld/ELF/LinkerScript.h
@@ -228,6 +228,7 @@ protected:
MemoryRegion *findMemoryRegion(OutputSectionCommand *Cmd);
void switchTo(OutputSection *Sec);
+ uint64_t advance(uint64_t Size, unsigned Align);
void output(InputSection *Sec);
void process(BaseCommand &Base);
@@ -252,7 +253,7 @@ public:
bool isDefined(StringRef S);
std::vector<OutputSection *> *OutputSections;
- void fabricateDefaultCommands(bool AllocateHeader);
+ void fabricateDefaultCommands();
void addOrphanSections(OutputSectionFactory &Factory);
void removeEmptyCommands();
void adjustSectionsBeforeSorting();
@@ -269,9 +270,8 @@ public:
void processNonSectionCommands();
void synchronize();
void assignAddresses(std::vector<PhdrEntry> &Phdrs);
- int getSectionIndex(StringRef Name);
- void writeDataBytes(StringRef Name, uint8_t *Buf);
+ void writeDataBytes(OutputSection *Sec, uint8_t *Buf);
void addSymbol(SymbolAssignment *Cmd);
void processCommands(OutputSectionFactory &Factory);
diff --git a/contrib/llvm/tools/lld/ELF/Options.td b/contrib/llvm/tools/lld/ELF/Options.td
index 8863912c179c..65a0e72d2320 100644
--- a/contrib/llvm/tools/lld/ELF/Options.td
+++ b/contrib/llvm/tools/lld/ELF/Options.td
@@ -290,6 +290,7 @@ def alias_L__library_path: J<"library-path=">, Alias<L>;
def alias_define_common_d: Flag<["-"], "d">, Alias<define_common>;
def alias_define_common_dc: F<"dc">, Alias<define_common>;
def alias_define_common_dp: F<"dp">, Alias<define_common>;
+def alias_defsym: S<"defsym">, Alias<defsym>;
def alias_discard_all_x: Flag<["-"], "x">, Alias<discard_all>;
def alias_discard_locals_X: Flag<["-"], "X">, Alias<discard_locals>;
def alias_dynamic_list: J<"dynamic-list=">, Alias<dynamic_list>;
diff --git a/contrib/llvm/tools/lld/ELF/OutputSections.cpp b/contrib/llvm/tools/lld/ELF/OutputSections.cpp
index 839f68f2da55..cb9c57657af3 100644
--- a/contrib/llvm/tools/lld/ELF/OutputSections.cpp
+++ b/contrib/llvm/tools/lld/ELF/OutputSections.cpp
@@ -68,7 +68,8 @@ void OutputSection::writeHeaderTo(typename ELFT::Shdr *Shdr) {
OutputSection::OutputSection(StringRef Name, uint32_t Type, uint64_t Flags)
: SectionBase(Output, Name, Flags, /*Entsize*/ 0, /*Alignment*/ 1, Type,
/*Info*/ 0,
- /*Link*/ 0) {}
+ /*Link*/ 0),
+ SectionIndex(INT_MAX) {}
static bool compareByFilePosition(InputSection *A, InputSection *B) {
// Synthetic doesn't have link order dependecy, stable_sort will keep it last
@@ -139,12 +140,24 @@ template <class ELFT> void OutputSection::finalize() {
this->Info = S->OutSec->SectionIndex;
}
+static uint64_t updateOffset(uint64_t Off, InputSection *S) {
+ Off = alignTo(Off, S->Alignment);
+ S->OutSecOff = Off;
+ return Off + S->getSize();
+}
+
void OutputSection::addSection(InputSection *S) {
assert(S->Live);
Sections.push_back(S);
S->OutSec = this;
this->updateAlignment(S->Alignment);
+ // The actual offsets will be computed by assignAddresses. For now, use
+ // crude approximation so that it is at least easy for other code to know the
+ // section order. It is also used to calculate the output section size early
+ // for compressed debug sections.
+ this->Size = updateOffset(Size, S);
+
// If this section contains a table of fixed-size entries, sh_entsize
// holds the element size. Consequently, if this contains two or more
// input sections, all of them must have the same sh_entsize. However,
@@ -159,11 +172,8 @@ void OutputSection::addSection(InputSection *S) {
// and scan relocations to setup sections' offsets.
void OutputSection::assignOffsets() {
uint64_t Off = 0;
- for (InputSection *S : Sections) {
- Off = alignTo(Off, S->Alignment);
- S->OutSecOff = Off;
- Off += S->getSize();
- }
+ for (InputSection *S : Sections)
+ Off = updateOffset(Off, S);
this->Size = Off;
}
@@ -305,7 +315,7 @@ template <class ELFT> void OutputSection::writeTo(uint8_t *Buf) {
// Linker scripts may have BYTE()-family commands with which you
// can write arbitrary bytes to the output. Process them if any.
- Script->writeDataBytes(Name, Buf);
+ Script->writeDataBytes(this, Buf);
}
static uint64_t getOutFlags(InputSectionBase *S) {
diff --git a/contrib/llvm/tools/lld/ELF/Relocations.cpp b/contrib/llvm/tools/lld/ELF/Relocations.cpp
index baef0a2f2257..f5db931e9755 100644
--- a/contrib/llvm/tools/lld/ELF/Relocations.cpp
+++ b/contrib/llvm/tools/lld/ELF/Relocations.cpp
@@ -233,7 +233,7 @@ handleTlsRelocation(uint32_t Type, SymbolBody &Body, InputSectionBase &C,
}
// Local-Dynamic relocs can be relaxed to Local-Exec.
- if (Target->isTlsLocalDynamicRel(Type) && !Config->Shared) {
+ if (isRelExprOneOf<R_ABS, R_TLSLD, R_TLSLD_PC>(Expr) && !Config->Shared) {
C.Relocations.push_back(
{R_RELAX_TLS_LD_TO_LE, Type, Offset, Addend, &Body});
return 1;
@@ -282,7 +282,8 @@ handleTlsRelocation(uint32_t Type, SymbolBody &Body, InputSectionBase &C,
// Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally
// defined.
- if (Target->isTlsInitialExecRel(Type) && !Config->Shared && !IsPreemptible) {
+ if (isRelExprOneOf<R_GOT, R_GOT_FROM_END, R_GOT_PC, R_GOT_PAGE_PC>(Expr) &&
+ !Config->Shared && !IsPreemptible) {
C.Relocations.push_back(
{R_RELAX_TLS_IE_TO_LE, Type, Offset, Addend, &Body});
return 1;
@@ -694,17 +695,6 @@ static void reportUndefined(SymbolBody &Sym, InputSectionBase &S,
warn(Msg);
} else {
error(Msg);
-
- if (Config->ArchiveWithoutSymbolsSeen) {
- message("At least one archive listed no symbols in its index."
- " This can happen when creating archives with a version"
- " of ar that does not understand the object files in"
- " the archive. For example, if you are using LLVM"
- " bitcode objects (such as created by -flto), you may"
- " need to use llvm-ar or GNU ar with a plugin.");
- // Reset to false so that we print the message only once.
- Config->ArchiveWithoutSymbolsSeen = false;
- }
}
}
diff --git a/contrib/llvm/tools/lld/ELF/SymbolTable.cpp b/contrib/llvm/tools/lld/ELF/SymbolTable.cpp
index 30f1c3653f50..ed8a790c9599 100644
--- a/contrib/llvm/tools/lld/ELF/SymbolTable.cpp
+++ b/contrib/llvm/tools/lld/ELF/SymbolTable.cpp
@@ -540,13 +540,10 @@ void SymbolTable<ELFT>::addLazyObject(StringRef Name, LazyObjectFile &Obj) {
return;
// See comment for addLazyArchive above.
- if (S->isWeak()) {
+ if (S->isWeak())
replaceBody<LazyObject>(S, Name, Obj, S->body()->Type);
- } else {
- MemoryBufferRef MBRef = Obj.getBuffer();
- if (!MBRef.getBuffer().empty())
- addFile(createObjectFile(MBRef));
- }
+ else if (InputFile *F = Obj.fetch())
+ addFile(F);
}
// Process undefined (-u) flags by loading lazy symbols named by those flags.
diff --git a/contrib/llvm/tools/lld/ELF/Symbols.cpp b/contrib/llvm/tools/lld/ELF/Symbols.cpp
index 01caa6daa5ac..2090b33e8cd6 100644
--- a/contrib/llvm/tools/lld/ELF/Symbols.cpp
+++ b/contrib/llvm/tools/lld/ELF/Symbols.cpp
@@ -327,12 +327,7 @@ InputFile *LazyArchive::fetch() {
return createObjectFile(MBInfo.first, file()->getName(), MBInfo.second);
}
-InputFile *LazyObject::fetch() {
- MemoryBufferRef MBRef = file()->getBuffer();
- if (MBRef.getBuffer().empty())
- return nullptr;
- return createObjectFile(MBRef);
-}
+InputFile *LazyObject::fetch() { return file()->fetch(); }
uint8_t Symbol::computeBinding() const {
if (Config->Relocatable)
diff --git a/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp b/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp
index a271d31048f5..9c585e41e9f0 100644
--- a/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp
+++ b/contrib/llvm/tools/lld/ELF/SyntheticSections.cpp
@@ -1038,6 +1038,15 @@ template <class ELFT> void DynamicSection<ELFT>::addEntries() {
if (!Config->SoName.empty())
add({DT_SONAME, In<ELFT>::DynStrTab->addString(Config->SoName)});
+ if (!Config->Shared && !Config->Relocatable)
+ add({DT_DEBUG, (uint64_t)0});
+}
+
+// Add remaining entries to complete .dynamic contents.
+template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {
+ if (this->Size)
+ return; // Already finalized.
+
// Set DT_FLAGS and DT_FLAGS_1.
uint32_t DtFlags = 0;
uint32_t DtFlags1 = 0;
@@ -1055,21 +1064,14 @@ template <class ELFT> void DynamicSection<ELFT>::addEntries() {
DtFlags |= DF_ORIGIN;
DtFlags1 |= DF_1_ORIGIN;
}
+ if (Config->HasStaticTlsModel)
+ DtFlags |= DF_STATIC_TLS;
if (DtFlags)
add({DT_FLAGS, DtFlags});
if (DtFlags1)
add({DT_FLAGS_1, DtFlags1});
- if (!Config->Shared && !Config->Relocatable)
- add({DT_DEBUG, (uint64_t)0});
-}
-
-// Add remaining entries to complete .dynamic contents.
-template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {
- if (this->Size)
- return; // Already finalized.
-
this->Link = In<ELFT>::DynStrTab->OutSec->SectionIndex;
if (In<ELFT>::RelaDyn->OutSec->Size > 0) {
bool IsRela = Config->IsRela;
diff --git a/contrib/llvm/tools/lld/ELF/Target.cpp b/contrib/llvm/tools/lld/ELF/Target.cpp
index 921505ae4b61..4643c1a919aa 100644
--- a/contrib/llvm/tools/lld/ELF/Target.cpp
+++ b/contrib/llvm/tools/lld/ELF/Target.cpp
@@ -124,8 +124,6 @@ public:
int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
void writeGotPltHeader(uint8_t *Buf) const override;
uint32_t getDynRel(uint32_t Type) const override;
- bool isTlsLocalDynamicRel(uint32_t Type) const override;
- bool isTlsInitialExecRel(uint32_t Type) const override;
void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
void writePltHeader(uint8_t *Buf) const override;
@@ -147,8 +145,6 @@ public:
RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
const uint8_t *Loc) const override;
bool isPicRel(uint32_t Type) const override;
- bool isTlsLocalDynamicRel(uint32_t Type) const override;
- bool isTlsInitialExecRel(uint32_t Type) const override;
void writeGotPltHeader(uint8_t *Buf) const override;
void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
void writePltHeader(uint8_t *Buf) const override;
@@ -193,7 +189,6 @@ public:
RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
const uint8_t *Loc) const override;
bool isPicRel(uint32_t Type) const override;
- bool isTlsInitialExecRel(uint32_t Type) const override;
void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
void writePltHeader(uint8_t *Buf) const override;
void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
@@ -303,10 +298,6 @@ bool TargetInfo::needsThunk(RelExpr Expr, uint32_t RelocType,
return false;
}
-bool TargetInfo::isTlsInitialExecRel(uint32_t Type) const { return false; }
-
-bool TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const { return false; }
-
void TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
writeGotPlt(Buf, S);
}
@@ -360,6 +351,15 @@ X86TargetInfo::X86TargetInfo() {
RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
const uint8_t *Loc) const {
+ // There are 4 different TLS variable models with varying degrees of
+ // flexibility and performance. LocalExec and InitialExec models are fast but
+ // less-flexible models. They cannot be used for dlopen(). If they are in use,
+ // we set DF_STATIC_TLS in the ELF header so that the runtime can reject such
+ // DSOs.
+ if (Type == R_386_TLS_LE || Type == R_386_TLS_LE_32 || Type == R_386_TLS_IE ||
+ Type == R_386_TLS_GOTIE)
+ Config->HasStaticTlsModel = true;
+
switch (Type) {
case R_386_8:
case R_386_16:
@@ -451,14 +451,6 @@ uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
return Type;
}
-bool X86TargetInfo::isTlsLocalDynamicRel(uint32_t Type) const {
- return Type == R_386_TLS_LDO_32 || Type == R_386_TLS_LDM;
-}
-
-bool X86TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
- return Type == R_386_TLS_IE || Type == R_386_TLS_GOTIE;
-}
-
void X86TargetInfo::writePltHeader(uint8_t *Buf) const {
if (Config->Pic) {
const uint8_t V[] = {
@@ -772,17 +764,6 @@ bool X86_64TargetInfo<ELFT>::isPicRel(uint32_t Type) const {
}
template <class ELFT>
-bool X86_64TargetInfo<ELFT>::isTlsInitialExecRel(uint32_t Type) const {
- return Type == R_X86_64_GOTTPOFF;
-}
-
-template <class ELFT>
-bool X86_64TargetInfo<ELFT>::isTlsLocalDynamicRel(uint32_t Type) const {
- return Type == R_X86_64_DTPOFF32 || Type == R_X86_64_DTPOFF64 ||
- Type == R_X86_64_TLSLD;
-}
-
-template <class ELFT>
void X86_64TargetInfo<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
uint64_t Val) const {
// Convert
@@ -1383,11 +1364,6 @@ bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
}
}
-bool AArch64TargetInfo::isTlsInitialExecRel(uint32_t Type) const {
- return Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 ||
- Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
-}
-
bool AArch64TargetInfo::isPicRel(uint32_t Type) const {
return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64;
}
diff --git a/contrib/llvm/tools/lld/ELF/Target.h b/contrib/llvm/tools/lld/ELF/Target.h
index 4b88626050b3..f4f366219d86 100644
--- a/contrib/llvm/tools/lld/ELF/Target.h
+++ b/contrib/llvm/tools/lld/ELF/Target.h
@@ -23,8 +23,6 @@ class SymbolBody;
class TargetInfo {
public:
- virtual bool isTlsInitialExecRel(uint32_t Type) const;
- virtual bool isTlsLocalDynamicRel(uint32_t Type) const;
virtual bool isPicRel(uint32_t Type) const { return true; }
virtual uint32_t getDynRel(uint32_t Type) const { return Type; }
virtual void writeGotPltHeader(uint8_t *Buf) const {}
diff --git a/contrib/llvm/tools/lld/ELF/Writer.cpp b/contrib/llvm/tools/lld/ELF/Writer.cpp
index 326df41b2286..96e4501754cf 100644
--- a/contrib/llvm/tools/lld/ELF/Writer.cpp
+++ b/contrib/llvm/tools/lld/ELF/Writer.cpp
@@ -62,7 +62,6 @@ private:
void assignFileOffsets();
void assignFileOffsetsBinary();
void setPhdrs();
- void fixHeaders();
void fixSectionAlignments();
void fixPredefinedSymbols();
void openFile();
@@ -86,7 +85,6 @@ private:
uint64_t FileSize;
uint64_t SectionHeaderOff;
- bool AllocateHeader = true;
};
} // anonymous namespace
@@ -252,7 +250,7 @@ template <class ELFT> void Writer<ELFT>::run() {
} else {
if (!Script->Opt.HasSections) {
fixSectionAlignments();
- Script->fabricateDefaultCommands(AllocateHeader);
+ Script->fabricateDefaultCommands();
}
Script->synchronize();
Script->assignAddresses(Phdrs);
@@ -747,15 +745,12 @@ static bool compareSectionsNonScript(const OutputSection *A,
// Output section ordering is determined by this function.
template <class ELFT>
static bool compareSections(const OutputSection *A, const OutputSection *B) {
- // For now, put sections mentioned in a linker script first.
- int AIndex = Script->getSectionIndex(A->Name);
- int BIndex = Script->getSectionIndex(B->Name);
- bool AInScript = AIndex != INT_MAX;
- bool BInScript = BIndex != INT_MAX;
- if (AInScript != BInScript)
- return AInScript;
- // If both are in the script, use that order.
- if (AInScript)
+ // For now, put sections mentioned in a linker script
+ // first. Sections not on linker script will have a SectionIndex of
+ // INT_MAX.
+ int AIndex = A->SectionIndex;
+ int BIndex = B->SectionIndex;
+ if (AIndex != BIndex)
return AIndex < BIndex;
return compareSectionsNonScript<ELFT>(A, B);
@@ -1021,9 +1016,8 @@ template <class ELFT> void Writer<ELFT>::sortSections() {
auto I = OutputSections.begin();
auto E = OutputSections.end();
auto NonScriptI =
- std::find_if(OutputSections.begin(), E, [](OutputSection *S) {
- return Script->getSectionIndex(S->Name) == INT_MAX;
- });
+ std::find_if(OutputSections.begin(), E,
+ [](OutputSection *S) { return S->SectionIndex == INT_MAX; });
while (NonScriptI != E) {
auto BestPos = std::max_element(
I, NonScriptI, [&](OutputSection *&A, OutputSection *&B) {
@@ -1176,7 +1170,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
if (!Config->Relocatable && !Config->OFormatBinary) {
Phdrs = Script->hasPhdrsCommands() ? Script->createPhdrs() : createPhdrs();
addPtArmExid(Phdrs);
- fixHeaders();
+ Out::ProgramHeaders->Size = sizeof(Elf_Phdr) * Phdrs.size();
}
// Dynamic section must be the last one in this list and dynamic
@@ -1321,6 +1315,11 @@ template <class ELFT> std::vector<PhdrEntry> Writer<ELFT>::createPhdrs() {
// Add the first PT_LOAD segment for regular output sections.
uint64_t Flags = computeFlags(PF_R);
PhdrEntry *Load = AddHdr(PT_LOAD, Flags);
+
+ // Add the headers. We will remove them if they don't fit.
+ Load->add(Out::ElfHeader);
+ Load->add(Out::ProgramHeaders);
+
for (OutputSection *Sec : OutputSections) {
if (!(Sec->Flags & SHF_ALLOC))
break;
@@ -1447,64 +1446,6 @@ template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
}
}
-bool elf::allocateHeaders(std::vector<PhdrEntry> &Phdrs,
- ArrayRef<OutputSection *> OutputSections,
- uint64_t Min) {
- auto FirstPTLoad =
- std::find_if(Phdrs.begin(), Phdrs.end(),
- [](const PhdrEntry &E) { return E.p_type == PT_LOAD; });
- if (FirstPTLoad == Phdrs.end())
- return false;
-
- uint64_t HeaderSize = getHeaderSize();
- if (HeaderSize > Min) {
- auto PhdrI =
- std::find_if(Phdrs.begin(), Phdrs.end(),
- [](const PhdrEntry &E) { return E.p_type == PT_PHDR; });
- if (PhdrI != Phdrs.end())
- Phdrs.erase(PhdrI);
- return false;
- }
- Min = alignDown(Min - HeaderSize, Config->MaxPageSize);
-
- if (!Script->Opt.HasSections)
- Config->ImageBase = Min = std::min(Min, Config->ImageBase);
-
- Out::ElfHeader->Addr = Min;
- Out::ProgramHeaders->Addr = Min + Out::ElfHeader->Size;
-
- if (Script->hasPhdrsCommands())
- return true;
-
- if (FirstPTLoad->First)
- for (OutputSection *Sec : OutputSections)
- if (Sec->FirstInPtLoad == FirstPTLoad->First)
- Sec->FirstInPtLoad = Out::ElfHeader;
- FirstPTLoad->First = Out::ElfHeader;
- if (!FirstPTLoad->Last)
- FirstPTLoad->Last = Out::ProgramHeaders;
- return true;
-}
-
-// We should set file offsets and VAs for elf header and program headers
-// sections. These are special, we do not include them into output sections
-// list, but have them to simplify the code.
-template <class ELFT> void Writer<ELFT>::fixHeaders() {
- Out::ProgramHeaders->Size = sizeof(Elf_Phdr) * Phdrs.size();
- // If the script has SECTIONS, assignAddresses will compute the values.
- if (Script->Opt.HasSections)
- return;
-
- // When -T<section> option is specified, lower the base to make room for those
- // sections.
- uint64_t Min = -1;
- if (!Config->SectionStartMap.empty())
- for (const auto &P : Config->SectionStartMap)
- Min = std::min(Min, P.second);
-
- AllocateHeader = allocateHeaders(Phdrs, OutputSections, Min);
-}
-
// Adjusts the file alignment for a given output section and returns
// its new file offset. The file offset must be the same with its
// virtual address (modulo the page size) so that the loader can load
diff --git a/contrib/llvm/tools/lld/ELF/Writer.h b/contrib/llvm/tools/lld/ELF/Writer.h
index a669e42ef205..8b965f7beddb 100644
--- a/contrib/llvm/tools/lld/ELF/Writer.h
+++ b/contrib/llvm/tools/lld/ELF/Writer.h
@@ -49,9 +49,6 @@ struct PhdrEntry {
llvm::StringRef getOutputSectionName(llvm::StringRef Name);
-bool allocateHeaders(std::vector<PhdrEntry> &, llvm::ArrayRef<OutputSection *>,
- uint64_t Min);
-
template <class ELFT> uint32_t getMipsEFlags();
uint8_t getMipsFpAbiFlag(uint8_t OldFlag, uint8_t NewFlag,
diff --git a/contrib/llvm/tools/lld/include/lld/Core/Parallel.h b/contrib/llvm/tools/lld/include/lld/Core/Parallel.h
index 64b4f2ab04d8..58fa87e85c51 100644
--- a/contrib/llvm/tools/lld/include/lld/Core/Parallel.h
+++ b/contrib/llvm/tools/lld/include/lld/Core/Parallel.h
@@ -10,16 +10,12 @@
#ifndef LLD_CORE_PARALLEL_H
#define LLD_CORE_PARALLEL_H
-#include "lld/Core/Instrumentation.h"
#include "lld/Core/LLVM.h"
+#include "lld/Core/TaskGroup.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/thread.h"
+#include "llvm/Config/llvm-config.h"
#include <algorithm>
-#include <atomic>
-#include <condition_variable>
-#include <mutex>
-#include <stack>
#if defined(_MSC_VER) && LLVM_ENABLE_THREADS
#include <concrt.h>
@@ -27,249 +23,84 @@
#endif
namespace lld {
-/// \brief Allows one or more threads to wait on a potentially unknown number of
-/// events.
-///
-/// A latch starts at \p count. inc() increments this, and dec() decrements it.
-/// All calls to sync() will block while the count is not 0.
-///
-/// Calling dec() on a Latch with a count of 0 has undefined behaivor.
-class Latch {
- uint32_t _count;
- mutable std::mutex _condMut;
- mutable std::condition_variable _cond;
-public:
- explicit Latch(uint32_t count = 0) : _count(count) {}
- ~Latch() { sync(); }
-
- void inc() {
- std::unique_lock<std::mutex> lock(_condMut);
- ++_count;
- }
-
- void dec() {
- std::unique_lock<std::mutex> lock(_condMut);
- if (--_count == 0)
- _cond.notify_all();
- }
-
- void sync() const {
- std::unique_lock<std::mutex> lock(_condMut);
- _cond.wait(lock, [&] {
- return _count == 0;
- });
- }
-};
-
-// Classes in this namespace are implementation details of this header.
-namespace internal {
-
-/// \brief An abstract class that takes closures and runs them asynchronously.
-class Executor {
-public:
- virtual ~Executor() = default;
- virtual void add(std::function<void()> func) = 0;
-};
-
-#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
-class SyncExecutor : public Executor {
-public:
- virtual void add(std::function<void()> func) {
- func();
- }
-};
-
-inline Executor *getDefaultExecutor() {
- static SyncExecutor exec;
- return &exec;
-}
-#elif defined(_MSC_VER)
-/// \brief An Executor that runs tasks via ConcRT.
-class ConcRTExecutor : public Executor {
- struct Taskish {
- Taskish(std::function<void()> task) : _task(task) {}
-
- std::function<void()> _task;
-
- static void run(void *p) {
- Taskish *self = static_cast<Taskish *>(p);
- self->_task();
- concurrency::Free(self);
- }
- };
-
-public:
- virtual void add(std::function<void()> func) {
- Concurrency::CurrentScheduler::ScheduleTask(Taskish::run,
- new (concurrency::Alloc(sizeof(Taskish))) Taskish(func));
- }
-};
-
-inline Executor *getDefaultExecutor() {
- static ConcRTExecutor exec;
- return &exec;
-}
-#else
-/// \brief An implementation of an Executor that runs closures on a thread pool
-/// in filo order.
-class ThreadPoolExecutor : public Executor {
-public:
- explicit ThreadPoolExecutor(unsigned threadCount =
- std::thread::hardware_concurrency())
- : _stop(false), _done(threadCount) {
- // Spawn all but one of the threads in another thread as spawning threads
- // can take a while.
- std::thread([&, threadCount] {
- for (size_t i = 1; i < threadCount; ++i) {
- std::thread([=] {
- work();
- }).detach();
- }
- work();
- }).detach();
- }
-
- ~ThreadPoolExecutor() override {
- std::unique_lock<std::mutex> lock(_mutex);
- _stop = true;
- lock.unlock();
- _cond.notify_all();
- // Wait for ~Latch.
- }
-
- void add(std::function<void()> f) override {
- std::unique_lock<std::mutex> lock(_mutex);
- _workStack.push(f);
- lock.unlock();
- _cond.notify_one();
- }
-
-private:
- void work() {
- while (true) {
- std::unique_lock<std::mutex> lock(_mutex);
- _cond.wait(lock, [&] {
- return _stop || !_workStack.empty();
- });
- if (_stop)
- break;
- auto task = _workStack.top();
- _workStack.pop();
- lock.unlock();
- task();
- }
- _done.dec();
- }
-
- std::atomic<bool> _stop;
- std::stack<std::function<void()>> _workStack;
- std::mutex _mutex;
- std::condition_variable _cond;
- Latch _done;
-};
-
-inline Executor *getDefaultExecutor() {
- static ThreadPoolExecutor exec;
- return &exec;
-}
-#endif
-
-} // namespace internal
-
-/// \brief Allows launching a number of tasks and waiting for them to finish
-/// either explicitly via sync() or implicitly on destruction.
-class TaskGroup {
- Latch _latch;
-
-public:
- void spawn(std::function<void()> f) {
- _latch.inc();
- internal::getDefaultExecutor()->add([&, f] {
- f();
- _latch.dec();
- });
- }
-
- void sync() const { _latch.sync(); }
-};
-
-#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
-template <class RandomAccessIterator, class Comp>
+#if !LLVM_ENABLE_THREADS
+template <class RandomAccessIterator, class Comparator>
void parallel_sort(
- RandomAccessIterator start, RandomAccessIterator end,
- const Comp &comp = std::less<
+ RandomAccessIterator Start, RandomAccessIterator End,
+ const Comparator &Comp = std::less<
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {
- std::sort(start, end, comp);
+ std::sort(Start, End, Comp);
}
#elif defined(_MSC_VER)
// Use ppl parallel_sort on Windows.
-template <class RandomAccessIterator, class Comp>
+template <class RandomAccessIterator, class Comparator>
void parallel_sort(
- RandomAccessIterator start, RandomAccessIterator end,
- const Comp &comp = std::less<
+ RandomAccessIterator Start, RandomAccessIterator End,
+ const Comparator &Comp = std::less<
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {
- concurrency::parallel_sort(start, end, comp);
+ concurrency::parallel_sort(Start, End, Comp);
}
#else
namespace detail {
-const ptrdiff_t minParallelSize = 1024;
+const ptrdiff_t MinParallelSize = 1024;
/// \brief Inclusive median.
-template <class RandomAccessIterator, class Comp>
-RandomAccessIterator medianOf3(RandomAccessIterator start,
- RandomAccessIterator end, const Comp &comp) {
- RandomAccessIterator mid = start + (std::distance(start, end) / 2);
- return comp(*start, *(end - 1))
- ? (comp(*mid, *(end - 1)) ? (comp(*start, *mid) ? mid : start)
- : end - 1)
- : (comp(*mid, *start) ? (comp(*(end - 1), *mid) ? mid : end - 1)
- : start);
-}
-
-template <class RandomAccessIterator, class Comp>
-void parallel_quick_sort(RandomAccessIterator start, RandomAccessIterator end,
- const Comp &comp, TaskGroup &tg, size_t depth) {
+template <class RandomAccessIterator, class Comparator>
+RandomAccessIterator medianOf3(RandomAccessIterator Start,
+ RandomAccessIterator End,
+ const Comparator &Comp) {
+ RandomAccessIterator Mid = Start + (std::distance(Start, End) / 2);
+ return Comp(*Start, *(End - 1))
+ ? (Comp(*Mid, *(End - 1)) ? (Comp(*Start, *Mid) ? Mid : Start)
+ : End - 1)
+ : (Comp(*Mid, *Start) ? (Comp(*(End - 1), *Mid) ? Mid : End - 1)
+ : Start);
+}
+
+template <class RandomAccessIterator, class Comparator>
+void parallel_quick_sort(RandomAccessIterator Start, RandomAccessIterator End,
+ const Comparator &Comp, TaskGroup &TG, size_t Depth) {
// Do a sequential sort for small inputs.
- if (std::distance(start, end) < detail::minParallelSize || depth == 0) {
- std::sort(start, end, comp);
+ if (std::distance(Start, End) < detail::MinParallelSize || Depth == 0) {
+ std::sort(Start, End, Comp);
return;
}
// Partition.
- auto pivot = medianOf3(start, end, comp);
- // Move pivot to end.
- std::swap(*(end - 1), *pivot);
- pivot = std::partition(start, end - 1, [&comp, end](decltype(*start) v) {
- return comp(v, *(end - 1));
+ auto Pivot = medianOf3(Start, End, Comp);
+ // Move Pivot to End.
+ std::swap(*(End - 1), *Pivot);
+ Pivot = std::partition(Start, End - 1, [&Comp, End](decltype(*Start) V) {
+ return Comp(V, *(End - 1));
});
- // Move pivot to middle of partition.
- std::swap(*pivot, *(end - 1));
+ // Move Pivot to middle of partition.
+ std::swap(*Pivot, *(End - 1));
// Recurse.
- tg.spawn([=, &comp, &tg] {
- parallel_quick_sort(start, pivot, comp, tg, depth - 1);
+ TG.spawn([=, &Comp, &TG] {
+ parallel_quick_sort(Start, Pivot, Comp, TG, Depth - 1);
});
- parallel_quick_sort(pivot + 1, end, comp, tg, depth - 1);
+ parallel_quick_sort(Pivot + 1, End, Comp, TG, Depth - 1);
}
}
-template <class RandomAccessIterator, class Comp>
+template <class RandomAccessIterator, class Comparator>
void parallel_sort(
- RandomAccessIterator start, RandomAccessIterator end,
- const Comp &comp = std::less<
+ RandomAccessIterator Start, RandomAccessIterator End,
+ const Comparator &Comp = std::less<
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {
- TaskGroup tg;
- detail::parallel_quick_sort(start, end, comp, tg,
- llvm::Log2_64(std::distance(start, end)) + 1);
+ TaskGroup TG;
+ detail::parallel_quick_sort(Start, End, Comp, TG,
+ llvm::Log2_64(std::distance(Start, End)) + 1);
}
#endif
-template <class T> void parallel_sort(T *start, T *end) {
- parallel_sort(start, end, std::less<T>());
+template <class T> void parallel_sort(T *Start, T *End) {
+ parallel_sort(Start, End, std::less<T>());
}
-#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
+#if !LLVM_ENABLE_THREADS
template <class IterTy, class FuncTy>
void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
std::for_each(Begin, End, Fn);
@@ -302,12 +133,12 @@ void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
if (TaskSize == 0)
TaskSize = 1;
- TaskGroup Tg;
+ TaskGroup TG;
while (TaskSize <= std::distance(Begin, End)) {
- Tg.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
+ TG.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
Begin += TaskSize;
}
- Tg.spawn([=, &Fn] { std::for_each(Begin, End, Fn); });
+ TG.spawn([=, &Fn] { std::for_each(Begin, End, Fn); });
}
template <class IndexTy, class FuncTy>
@@ -316,20 +147,20 @@ void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
if (TaskSize == 0)
TaskSize = 1;
- TaskGroup Tg;
+ TaskGroup TG;
IndexTy I = Begin;
for (; I + TaskSize < End; I += TaskSize) {
- Tg.spawn([=, &Fn] {
+ TG.spawn([=, &Fn] {
for (IndexTy J = I, E = I + TaskSize; J != E; ++J)
Fn(J);
});
}
- Tg.spawn([=, &Fn] {
+ TG.spawn([=, &Fn] {
for (IndexTy J = I; J < End; ++J)
Fn(J);
});
}
#endif
-} // end namespace lld
+} // End namespace lld
#endif // LLD_CORE_PARALLEL_H
diff --git a/contrib/llvm/tools/lld/include/lld/Core/TaskGroup.h b/contrib/llvm/tools/lld/include/lld/Core/TaskGroup.h
new file mode 100644
index 000000000000..82e9122f4ae2
--- /dev/null
+++ b/contrib/llvm/tools/lld/include/lld/Core/TaskGroup.h
@@ -0,0 +1,65 @@
+//===- lld/Core/TaskGroup.h - Task Group ----------------------------------===//
+//
+// The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLD_CORE_TASKGROUP_H
+#define LLD_CORE_TASKGROUP_H
+
+#include "lld/Core/LLVM.h"
+
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+
+namespace lld {
+/// \brief Allows one or more threads to wait on a potentially unknown number of
+/// events.
+///
+/// A latch starts at \p count. inc() increments this, and dec() decrements it.
+/// All calls to sync() will block while the count is not 0.
+///
+/// Calling dec() on a Latch with a count of 0 has undefined behaivor.
+class Latch {
+ uint32_t _count;
+ mutable std::mutex _condMut;
+ mutable std::condition_variable _cond;
+
+public:
+ explicit Latch(uint32_t count = 0) : _count(count) {}
+ ~Latch() { sync(); }
+
+ void inc() {
+ std::unique_lock<std::mutex> lock(_condMut);
+ ++_count;
+ }
+
+ void dec() {
+ std::unique_lock<std::mutex> lock(_condMut);
+ if (--_count == 0)
+ _cond.notify_all();
+ }
+
+ void sync() const {
+ std::unique_lock<std::mutex> lock(_condMut);
+ _cond.wait(lock, [&] { return _count == 0; });
+ }
+};
+
+/// \brief Allows launching a number of tasks and waiting for them to finish
+/// either explicitly via sync() or implicitly on destruction.
+class TaskGroup {
+ Latch _latch;
+
+public:
+ void spawn(std::function<void()> f);
+
+ void sync() const { _latch.sync(); }
+};
+}
+
+#endif
diff --git a/contrib/llvm/tools/lld/include/lld/Support/Memory.h b/contrib/llvm/tools/lld/include/lld/Support/Memory.h
deleted file mode 100644
index 46db4a39f696..000000000000
--- a/contrib/llvm/tools/lld/include/lld/Support/Memory.h
+++ /dev/null
@@ -1,63 +0,0 @@
-//===- Memory.h -------------------------------------------------*- C++ -*-===//
-//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines arena allocators.
-//
-// Almost all large objects, such as files, sections or symbols, are
-// used for the entire lifetime of the linker once they are created.
-// This usage characteristic makes arena allocator an attractive choice
-// where the entire linker is one arena. With an arena, newly created
-// objects belong to the arena and freed all at once when everything is done.
-// Arena allocators are efficient and easy to understand.
-// Most objects are allocated using the arena allocators defined by this file.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLD_MEMORY_H
-#define LLD_MEMORY_H
-
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/StringSaver.h"
-#include <vector>
-
-namespace lld {
-
-// Use this arena if your object doesn't have a destructor.
-extern llvm::BumpPtrAllocator BAlloc;
-extern llvm::StringSaver Saver;
-
-// These two classes are hack to keep track of all
-// SpecificBumpPtrAllocator instances.
-struct SpecificAllocBase {
- SpecificAllocBase() { Instances.push_back(this); }
- virtual ~SpecificAllocBase() = default;
- virtual void reset() = 0;
- static std::vector<SpecificAllocBase *> Instances;
-};
-
-template <class T> struct SpecificAlloc : public SpecificAllocBase {
- void reset() override { Alloc.DestroyAll(); }
- llvm::SpecificBumpPtrAllocator<T> Alloc;
-};
-
-// Use this arena if your object has a destructor.
-// Your destructor will be invoked from freeArena().
-template <typename T, typename... U> inline T *make(U &&... Args) {
- static SpecificAlloc<T> Alloc;
- return new (Alloc.Alloc.Allocate()) T(std::forward<U>(Args)...);
-}
-
-inline void freeArena() {
- for (SpecificAllocBase *Alloc : SpecificAllocBase::Instances)
- Alloc->reset();
- BAlloc.Reset();
-}
-}
-
-#endif
diff --git a/contrib/llvm/tools/lld/lib/Core/CMakeLists.txt b/contrib/llvm/tools/lld/lib/Core/CMakeLists.txt
index bbd9ad48b6df..cdd4e679ffa2 100644
--- a/contrib/llvm/tools/lld/lib/Core/CMakeLists.txt
+++ b/contrib/llvm/tools/lld/lib/Core/CMakeLists.txt
@@ -12,6 +12,7 @@ add_lld_library(lldCore
Resolver.cpp
SymbolTable.cpp
TargetOptionsCommandFlags.cpp
+ TaskGroup.cpp
Writer.cpp
ADDITIONAL_HEADER_DIRS
@@ -20,6 +21,9 @@ add_lld_library(lldCore
LINK_COMPONENTS
MC
Support
+
+ LINK_LIBS
+ ${LLVM_PTHREAD_LIB}
DEPENDS
${tablegen_deps}
diff --git a/contrib/llvm/tools/lld/lib/Core/TaskGroup.cpp b/contrib/llvm/tools/lld/lib/Core/TaskGroup.cpp
new file mode 100644
index 000000000000..d4de48ce3dc4
--- /dev/null
+++ b/contrib/llvm/tools/lld/lib/Core/TaskGroup.cpp
@@ -0,0 +1,141 @@
+//===- lld/Core/TaskGroup.cpp - Task Group --------------------------------===//
+//
+// The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lld/Core/TaskGroup.h"
+#include "llvm/Config/llvm-config.h"
+
+#include <atomic>
+#include <stack>
+#include <thread>
+
+#if defined(_MSC_VER) && LLVM_ENABLE_THREADS
+#include <concrt.h>
+#include <ppl.h>
+#endif
+
+using namespace lld;
+
+namespace {
+
+/// \brief An abstract class that takes closures and runs them asynchronously.
+class Executor {
+public:
+ virtual ~Executor() = default;
+ virtual void add(std::function<void()> func) = 0;
+
+ static Executor *getDefaultExecutor();
+};
+
+#if !LLVM_ENABLE_THREADS
+class SyncExecutor : public Executor {
+public:
+ virtual void add(std::function<void()> F) { F(); }
+};
+
+Executor *Executor::getDefaultExecutor() {
+ static SyncExecutor Exec;
+ return &Exec;
+}
+
+#elif defined(_MSC_VER)
+/// \brief An Executor that runs tasks via ConcRT.
+class ConcRTExecutor : public Executor {
+ struct Taskish {
+ Taskish(std::function<void()> Task) : Task(Task) {}
+
+ std::function<void()> Task;
+
+ static void run(void *P) {
+ Taskish *Self = static_cast<Taskish *>(P);
+ Self->Task();
+ concurrency::Free(Self);
+ }
+ };
+
+public:
+ virtual void add(std::function<void()> F) {
+ Concurrency::CurrentScheduler::ScheduleTask(
+ Taskish::run, new (concurrency::Alloc(sizeof(Taskish))) Taskish(F));
+ }
+};
+
+Executor *Executor::getDefaultExecutor() {
+ static ConcRTExecutor exec;
+ return &exec;
+}
+
+#else
+/// \brief An implementation of an Executor that runs closures on a thread pool
+/// in filo order.
+class ThreadPoolExecutor : public Executor {
+public:
+ explicit ThreadPoolExecutor(
+ unsigned ThreadCount = std::thread::hardware_concurrency())
+ : Done(ThreadCount) {
+ // Spawn all but one of the threads in another thread as spawning threads
+ // can take a while.
+ std::thread([&, ThreadCount] {
+ for (size_t i = 1; i < ThreadCount; ++i) {
+ std::thread([=] { work(); }).detach();
+ }
+ work();
+ }).detach();
+ }
+
+ ~ThreadPoolExecutor() override {
+ std::unique_lock<std::mutex> Lock(Mutex);
+ Stop = true;
+ Lock.unlock();
+ Cond.notify_all();
+ // Wait for ~Latch.
+ }
+
+ void add(std::function<void()> F) override {
+ std::unique_lock<std::mutex> Lock(Mutex);
+ WorkStack.push(F);
+ Lock.unlock();
+ Cond.notify_one();
+ }
+
+private:
+ void work() {
+ while (true) {
+ std::unique_lock<std::mutex> Lock(Mutex);
+ Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); });
+ if (Stop)
+ break;
+ auto Task = WorkStack.top();
+ WorkStack.pop();
+ Lock.unlock();
+ Task();
+ }
+ Done.dec();
+ }
+
+ std::atomic<bool> Stop{false};
+ std::stack<std::function<void()>> WorkStack;
+ std::mutex Mutex;
+ std::condition_variable Cond;
+ Latch Done;
+};
+
+Executor *Executor::getDefaultExecutor() {
+ static ThreadPoolExecutor exec;
+ return &exec;
+}
+#endif
+}
+
+void TaskGroup::spawn(std::function<void()> f) {
+ _latch.inc();
+ Executor::getDefaultExecutor()->add([&, f] {
+ f();
+ _latch.dec();
+ });
+}
diff --git a/contrib/llvm/tools/lldb/include/lldb/API/SBAddress.h b/contrib/llvm/tools/lldb/include/lldb/API/SBAddress.h
index ddbe5a742786..9e697beffdd1 100644
--- a/contrib/llvm/tools/lldb/include/lldb/API/SBAddress.h
+++ b/contrib/llvm/tools/lldb/include/lldb/API/SBAddress.h
@@ -103,6 +103,8 @@ protected:
const lldb_private::Address *operator->() const;
+ friend bool operator==(const SBAddress &lhs, const SBAddress &rhs);
+
lldb_private::Address *get();
lldb_private::Address &ref();
@@ -117,6 +119,8 @@ private:
std::unique_ptr<lldb_private::Address> m_opaque_ap;
};
+bool operator==(const SBAddress &lhs, const SBAddress &rhs);
+
} // namespace lldb
#endif // LLDB_SBAddress_h_
diff --git a/contrib/llvm/tools/lldb/include/lldb/API/SBInstruction.h b/contrib/llvm/tools/lldb/include/lldb/API/SBInstruction.h
index 0fc12eb61cba..23daf1c56637 100644
--- a/contrib/llvm/tools/lldb/include/lldb/API/SBInstruction.h
+++ b/contrib/llvm/tools/lldb/include/lldb/API/SBInstruction.h
@@ -53,6 +53,8 @@ public:
bool HasDelaySlot();
+ bool CanSetBreakpoint();
+
void Print(FILE *out);
bool GetDescription(lldb::SBStream &description);
diff --git a/contrib/llvm/tools/lldb/include/lldb/API/SBInstructionList.h b/contrib/llvm/tools/lldb/include/lldb/API/SBInstructionList.h
index 29baef5790eb..0323a3c80c05 100644
--- a/contrib/llvm/tools/lldb/include/lldb/API/SBInstructionList.h
+++ b/contrib/llvm/tools/lldb/include/lldb/API/SBInstructionList.h
@@ -32,6 +32,15 @@ public:
lldb::SBInstruction GetInstructionAtIndex(uint32_t idx);
+ // ----------------------------------------------------------------------
+ // Returns the number of instructions between the start and end address.
+ // If canSetBreakpoint is true then the count will be the number of
+ // instructions on which a breakpoint can be set.
+ // ----------------------------------------------------------------------
+ size_t GetInstructionsCount(const SBAddress &start,
+ const SBAddress &end,
+ bool canSetBreakpoint = false);
+
void Clear();
void AppendInstruction(lldb::SBInstruction inst);
diff --git a/contrib/llvm/tools/lldb/include/lldb/Core/Disassembler.h b/contrib/llvm/tools/lldb/include/lldb/Core/Disassembler.h
index 929b668c092b..addc83ad5e9d 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Core/Disassembler.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Core/Disassembler.h
@@ -173,6 +173,8 @@ public:
virtual bool HasDelaySlot();
+ bool CanSetBreakpoint ();
+
virtual size_t Decode(const Disassembler &disassembler,
const DataExtractor &data,
lldb::offset_t data_offset) = 0;
diff --git a/contrib/llvm/tools/lldb/include/lldb/Expression/Expression.h b/contrib/llvm/tools/lldb/include/lldb/Expression/Expression.h
index f48a7992227d..860444e9c2c2 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Expression/Expression.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Expression/Expression.h
@@ -99,6 +99,16 @@ public:
//------------------------------------------------------------------
lldb::addr_t StartAddress() { return m_jit_start_addr; }
+ //------------------------------------------------------------------
+ /// Called to notify the expression that it is about to be executed.
+ //------------------------------------------------------------------
+ virtual void WillStartExecuting() {}
+
+ //------------------------------------------------------------------
+ /// Called to notify the expression that its execution has finished.
+ //------------------------------------------------------------------
+ virtual void DidFinishExecuting() {}
+
virtual ExpressionTypeSystemHelper *GetTypeSystemHelper() { return nullptr; }
protected:
diff --git a/contrib/llvm/tools/lldb/include/lldb/Host/MainLoop.h b/contrib/llvm/tools/lldb/include/lldb/Host/MainLoop.h
index 79370bf8461f..f5d906e98a7b 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Host/MainLoop.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Host/MainLoop.h
@@ -42,6 +42,7 @@ private:
public:
typedef std::unique_ptr<SignalHandle> SignalHandleUP;
+ MainLoop();
~MainLoop() override;
ReadHandleUP RegisterReadObject(const lldb::IOObjectSP &object_sp,
@@ -71,6 +72,9 @@ protected:
void UnregisterSignal(int signo);
private:
+ void ProcessReadObject(IOObject::WaitableHandle handle);
+ void ProcessSignal(int signo);
+
class SignalHandle {
public:
~SignalHandle() { m_mainloop.UnregisterSignal(m_signo); }
@@ -97,6 +101,9 @@ private:
llvm::DenseMap<IOObject::WaitableHandle, Callback> m_read_fds;
llvm::DenseMap<int, SignalInfo> m_signals;
+#if HAVE_SYS_EVENT_H
+ int m_kqueue;
+#endif
bool m_terminate_request : 1;
};
diff --git a/contrib/llvm/tools/lldb/include/lldb/Host/common/UDPSocket.h b/contrib/llvm/tools/lldb/include/lldb/Host/common/UDPSocket.h
index 38524fa8f62b..977ce151e4ff 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Host/common/UDPSocket.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Host/common/UDPSocket.h
@@ -21,15 +21,13 @@ public:
Socket *&socket);
private:
- UDPSocket(NativeSocket socket, const UDPSocket &listen_socket);
+ UDPSocket(NativeSocket socket);
size_t Send(const void *buf, const size_t num_bytes) override;
Error Connect(llvm::StringRef name) override;
Error Listen(llvm::StringRef name, int backlog) override;
Error Accept(Socket *&socket) override;
- Error CreateSocket();
-
SocketAddress m_sockaddr;
};
}
diff --git a/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallFunction.h b/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallFunction.h
index 3d43491af9af..1c75b0a3645c 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallFunction.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallFunction.h
@@ -117,7 +117,7 @@ protected:
lldb::addr_t &start_load_addr,
lldb::addr_t &function_load_addr);
- void DoTakedown(bool success);
+ virtual void DoTakedown(bool success);
void SetBreakpoints();
diff --git a/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallUserExpression.h b/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallUserExpression.h
index f1425b2f97e1..5fe80927ca21 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallUserExpression.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Target/ThreadPlanCallUserExpression.h
@@ -35,6 +35,8 @@ public:
void GetDescription(Stream *s, lldb::DescriptionLevel level) override;
+ void DidPush() override;
+
void WillPop() override;
lldb::StopInfoSP GetRealStopInfo() override;
@@ -48,6 +50,7 @@ public:
}
protected:
+ void DoTakedown(bool success) override;
private:
lldb::UserExpressionSP
m_user_expression_sp; // This is currently just used to ensure the
diff --git a/contrib/llvm/tools/lldb/include/lldb/Utility/TaskPool.h b/contrib/llvm/tools/lldb/include/lldb/Utility/TaskPool.h
index fb936bbb739a..87b8824f9226 100644
--- a/contrib/llvm/tools/lldb/include/lldb/Utility/TaskPool.h
+++ b/contrib/llvm/tools/lldb/include/lldb/Utility/TaskPool.h
@@ -53,50 +53,6 @@ private:
static void AddTaskImpl(std::function<void()> &&task_fn);
};
-// Wrapper class around the global TaskPool implementation to make it possible
-// to create a set of
-// tasks and then wait for the tasks to be completed by the
-// WaitForNextCompletedTask call. This
-// class should be used when WaitForNextCompletedTask is needed because this
-// class add no other
-// extra functionality to the TaskPool class and it have a very minor
-// performance overhead.
-template <typename T> // The return type of the tasks what will be added to this
- // task runner
- class TaskRunner {
-public:
- // Add a task to the task runner what will also add the task to the global
- // TaskPool. The
- // function doesn't return the std::future for the task because it will be
- // supplied by the
- // WaitForNextCompletedTask after the task is completed.
- template <typename F, typename... Args> void AddTask(F &&f, Args &&... args);
-
- // Wait for the next task in this task runner to finish and then return the
- // std::future what
- // belongs to the finished task. If there is no task in this task runner
- // (neither pending nor
- // comleted) then this function will return an invalid future. Usually this
- // function should be
- // called in a loop processing the results of the tasks until it returns an
- // invalid std::future
- // what means that all task in this task runner is completed.
- std::future<T> WaitForNextCompletedTask();
-
- // Convenience method to wait for all task in this TaskRunner to finish. Do
- // NOT use this class
- // just because of this method. Use TaskPool instead and wait for each
- // std::future returned by
- // AddTask in a loop.
- void WaitForAllTasks();
-
-private:
- std::list<std::future<T>> m_ready;
- std::list<std::future<T>> m_pending;
- std::mutex m_mutex;
- std::condition_variable m_cv;
-};
-
template <typename F, typename... Args>
std::future<typename std::result_of<F(Args...)>::type>
TaskPool::AddTask(F &&f, Args &&... args) {
@@ -126,64 +82,10 @@ template <> struct TaskPool::RunTaskImpl<> {
static void Run() {}
};
-template <typename T>
-template <typename F, typename... Args>
-void TaskRunner<T>::AddTask(F &&f, Args &&... args) {
- std::unique_lock<std::mutex> lock(m_mutex);
- auto it = m_pending.emplace(m_pending.end());
- *it = std::move(TaskPool::AddTask(
- [this, it](F f, Args... args) {
- T &&r = f(std::forward<Args>(args)...);
-
- std::unique_lock<std::mutex> lock(this->m_mutex);
- this->m_ready.splice(this->m_ready.end(), this->m_pending, it);
- lock.unlock();
-
- this->m_cv.notify_one();
- return r;
- },
- std::forward<F>(f), std::forward<Args>(args)...));
-}
-
-template <>
-template <typename F, typename... Args>
-void TaskRunner<void>::AddTask(F &&f, Args &&... args) {
- std::unique_lock<std::mutex> lock(m_mutex);
- auto it = m_pending.emplace(m_pending.end());
- *it = std::move(TaskPool::AddTask(
- [this, it](F f, Args... args) {
- f(std::forward<Args>(args)...);
-
- std::unique_lock<std::mutex> lock(this->m_mutex);
- this->m_ready.emplace_back(std::move(*it));
- this->m_pending.erase(it);
- lock.unlock();
-
- this->m_cv.notify_one();
- },
- std::forward<F>(f), std::forward<Args>(args)...));
-}
-
-template <typename T> std::future<T> TaskRunner<T>::WaitForNextCompletedTask() {
- std::unique_lock<std::mutex> lock(m_mutex);
- if (m_ready.empty() && m_pending.empty())
- return std::future<T>(); // No more tasks
-
- if (m_ready.empty())
- m_cv.wait(lock, [this]() { return !this->m_ready.empty(); });
-
- std::future<T> res = std::move(m_ready.front());
- m_ready.pop_front();
-
- lock.unlock();
- res.wait();
-
- return std::move(res);
-}
-
-template <typename T> void TaskRunner<T>::WaitForAllTasks() {
- while (WaitForNextCompletedTask().valid())
- ;
-}
+// Run 'func' on every value from begin .. end-1. Each worker will grab
+// 'batch_size' numbers at a time to work on, so for very fast functions, batch
+// should be large enough to avoid too much cache line contention.
+void TaskMapOverInt(size_t begin, size_t end,
+ std::function<void(size_t)> const &func);
#endif // #ifndef utility_TaskPool_h_
diff --git a/contrib/llvm/tools/lldb/source/API/SBAddress.cpp b/contrib/llvm/tools/lldb/source/API/SBAddress.cpp
index b452ce327ab7..a3493d7c743f 100644
--- a/contrib/llvm/tools/lldb/source/API/SBAddress.cpp
+++ b/contrib/llvm/tools/lldb/source/API/SBAddress.cpp
@@ -55,6 +55,12 @@ const SBAddress &SBAddress::operator=(const SBAddress &rhs) {
return *this;
}
+bool lldb::operator==(const SBAddress &lhs, const SBAddress &rhs) {
+ if (lhs.IsValid() && rhs.IsValid())
+ return lhs.ref() == rhs.ref();
+ return false;
+}
+
bool SBAddress::IsValid() const {
return m_opaque_ap.get() != NULL && m_opaque_ap->IsValid();
}
diff --git a/contrib/llvm/tools/lldb/source/API/SBInstruction.cpp b/contrib/llvm/tools/lldb/source/API/SBInstruction.cpp
index c47307c733a8..8b7deb7011be 100644
--- a/contrib/llvm/tools/lldb/source/API/SBInstruction.cpp
+++ b/contrib/llvm/tools/lldb/source/API/SBInstruction.cpp
@@ -176,6 +176,13 @@ bool SBInstruction::HasDelaySlot() {
return false;
}
+bool SBInstruction::CanSetBreakpoint () {
+ lldb::InstructionSP inst_sp(GetOpaque());
+ if (inst_sp)
+ return inst_sp->CanSetBreakpoint();
+ return false;
+}
+
lldb::InstructionSP SBInstruction::GetOpaque() {
if (m_opaque_sp)
return m_opaque_sp->GetSP();
diff --git a/contrib/llvm/tools/lldb/source/API/SBInstructionList.cpp b/contrib/llvm/tools/lldb/source/API/SBInstructionList.cpp
index 04c37f50c2d7..3edb9eae98c1 100644
--- a/contrib/llvm/tools/lldb/source/API/SBInstructionList.cpp
+++ b/contrib/llvm/tools/lldb/source/API/SBInstructionList.cpp
@@ -9,6 +9,7 @@
#include "lldb/API/SBInstructionList.h"
#include "lldb/API/SBInstruction.h"
+#include "lldb/API/SBAddress.h"
#include "lldb/API/SBStream.h"
#include "lldb/Core/Disassembler.h"
#include "lldb/Core/Module.h"
@@ -49,6 +50,31 @@ SBInstruction SBInstructionList::GetInstructionAtIndex(uint32_t idx) {
return inst;
}
+size_t SBInstructionList::GetInstructionsCount(const SBAddress &start,
+ const SBAddress &end,
+ bool canSetBreakpoint) {
+ size_t num_instructions = GetSize();
+ size_t i = 0;
+ SBAddress addr;
+ size_t lower_index = 0;
+ size_t upper_index = 0;
+ size_t instructions_to_skip = 0;
+ for (i = 0; i < num_instructions; ++i) {
+ addr = GetInstructionAtIndex(i).GetAddress();
+ if (start == addr)
+ lower_index = i;
+ if (end == addr)
+ upper_index = i;
+ }
+ if (canSetBreakpoint)
+ for (i = lower_index; i <= upper_index; ++i) {
+ SBInstruction insn = GetInstructionAtIndex(i);
+ if (!insn.CanSetBreakpoint())
+ ++instructions_to_skip;
+ }
+ return upper_index - lower_index - instructions_to_skip;
+}
+
void SBInstructionList::Clear() { m_opaque_sp.reset(); }
void SBInstructionList::AppendInstruction(SBInstruction insn) {}
diff --git a/contrib/llvm/tools/lldb/source/API/SBProcess.cpp b/contrib/llvm/tools/lldb/source/API/SBProcess.cpp
index 5614cb468a69..0348113a9873 100644
--- a/contrib/llvm/tools/lldb/source/API/SBProcess.cpp
+++ b/contrib/llvm/tools/lldb/source/API/SBProcess.cpp
@@ -1157,22 +1157,34 @@ uint32_t SBProcess::LoadImage(lldb::SBFileSpec &sb_remote_image_spec,
uint32_t SBProcess::LoadImage(const lldb::SBFileSpec &sb_local_image_spec,
const lldb::SBFileSpec &sb_remote_image_spec,
lldb::SBError &sb_error) {
+ Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
ProcessSP process_sp(GetSP());
if (process_sp) {
Process::StopLocker stop_locker;
if (stop_locker.TryLock(&process_sp->GetRunLock())) {
+ if (log)
+ log->Printf("SBProcess(%p)::LoadImage() => calling Platform::LoadImage"
+ "for: %s",
+ static_cast<void *>(process_sp.get()),
+ sb_local_image_spec.GetFilename());
+
std::lock_guard<std::recursive_mutex> guard(
- process_sp->GetTarget().GetAPIMutex());
+ process_sp->GetTarget().GetAPIMutex());
PlatformSP platform_sp = process_sp->GetTarget().GetPlatform();
return platform_sp->LoadImage(process_sp.get(), *sb_local_image_spec,
*sb_remote_image_spec, sb_error.ref());
} else {
- Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_API));
if (log)
log->Printf("SBProcess(%p)::LoadImage() => error: process is running",
static_cast<void *>(process_sp.get()));
sb_error.SetErrorString("process is running");
}
+ } else {
+ if (log)
+ log->Printf("SBProcess(%p)::LoadImage() => error: called with invalid"
+ " process",
+ static_cast<void *>(process_sp.get()));
+ sb_error.SetErrorString("process is invalid");
}
return LLDB_INVALID_IMAGE_TOKEN;
}
diff --git a/contrib/llvm/tools/lldb/source/Core/Disassembler.cpp b/contrib/llvm/tools/lldb/source/Core/Disassembler.cpp
index 3880bfd16ecc..51d93d9acdbb 100644
--- a/contrib/llvm/tools/lldb/source/Core/Disassembler.cpp
+++ b/contrib/llvm/tools/lldb/source/Core/Disassembler.cpp
@@ -759,6 +759,10 @@ bool Instruction::DumpEmulation(const ArchSpec &arch) {
return false;
}
+bool Instruction::CanSetBreakpoint () {
+ return !HasDelaySlot();
+}
+
bool Instruction::HasDelaySlot() {
// Default is false.
return false;
diff --git a/contrib/llvm/tools/lldb/source/Host/common/Editline.cpp b/contrib/llvm/tools/lldb/source/Host/common/Editline.cpp
index b157cdb7c110..851287e76331 100644
--- a/contrib/llvm/tools/lldb/source/Host/common/Editline.cpp
+++ b/contrib/llvm/tools/lldb/source/Host/common/Editline.cpp
@@ -367,7 +367,7 @@ void Editline::MoveCursor(CursorLocation from, CursorLocation to) {
if (to == CursorLocation::EditingCursor) {
toColumn =
editline_cursor_position - (editline_cursor_row * m_terminal_width) + 1;
- } else if (to == CursorLocation::BlockEnd) {
+ } else if (to == CursorLocation::BlockEnd && !m_input_lines.empty()) {
toColumn =
((m_input_lines[m_input_lines.size() - 1].length() + GetPromptWidth()) %
80) +
diff --git a/contrib/llvm/tools/lldb/source/Host/common/MainLoop.cpp b/contrib/llvm/tools/lldb/source/Host/common/MainLoop.cpp
index 8a9d4f020d5f..abd52f7f46fb 100644
--- a/contrib/llvm/tools/lldb/source/Host/common/MainLoop.cpp
+++ b/contrib/llvm/tools/lldb/source/Host/common/MainLoop.cpp
@@ -18,6 +18,11 @@
#include <vector>
#include <time.h>
+// Multiplexing is implemented using kqueue on systems that support it (BSD
+// variants including OSX). On linux we use ppoll, while android uses pselect
+// (ppoll is present but not implemented properly). On windows we use WSApoll
+// (which does not support signals).
+
#if HAVE_SYS_EVENT_H
#include <sys/event.h>
#elif defined(LLVM_ON_WIN32)
@@ -65,92 +70,72 @@ static void SignalHandler(int signo, siginfo_t *info, void *) {
class MainLoop::RunImpl {
public:
- // TODO: Use llvm::Expected<T>
- static std::unique_ptr<RunImpl> Create(MainLoop &loop, Error &error);
- ~RunImpl();
+ RunImpl(MainLoop &loop);
+ ~RunImpl() = default;
Error Poll();
-
- template <typename F> void ForEachReadFD(F &&f);
- template <typename F> void ForEachSignal(F &&f);
+ void ProcessEvents();
private:
MainLoop &loop;
#if HAVE_SYS_EVENT_H
- int queue_id;
std::vector<struct kevent> in_events;
struct kevent out_events[4];
int num_events = -1;
- RunImpl(MainLoop &loop, int queue_id) : loop(loop), queue_id(queue_id) {
- in_events.reserve(loop.m_read_fds.size() + loop.m_signals.size());
- }
#else
- std::vector<int> signals;
#ifdef FORCE_PSELECT
fd_set read_fd_set;
#else
std::vector<struct pollfd> read_fds;
#endif
- RunImpl(MainLoop &loop) : loop(loop) {
- signals.reserve(loop.m_signals.size());
- }
-
sigset_t get_sigmask();
#endif
};
#if HAVE_SYS_EVENT_H
-MainLoop::RunImpl::~RunImpl() {
- int r = close(queue_id);
- assert(r == 0);
- (void)r;
-}
-std::unique_ptr<MainLoop::RunImpl> MainLoop::RunImpl::Create(MainLoop &loop, Error &error)
-{
- error.Clear();
- int queue_id = kqueue();
- if(queue_id < 0) {
- error = Error(errno, eErrorTypePOSIX);
- return nullptr;
- }
- return std::unique_ptr<RunImpl>(new RunImpl(loop, queue_id));
+MainLoop::RunImpl::RunImpl(MainLoop &loop) : loop(loop) {
+ in_events.reserve(loop.m_read_fds.size());
}
Error MainLoop::RunImpl::Poll() {
- in_events.resize(loop.m_read_fds.size() + loop.m_signals.size());
+ in_events.resize(loop.m_read_fds.size());
unsigned i = 0;
for (auto &fd : loop.m_read_fds)
EV_SET(&in_events[i++], fd.first, EVFILT_READ, EV_ADD, 0, 0, 0);
- for (const auto &sig : loop.m_signals)
- EV_SET(&in_events[i++], sig.first, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
-
- num_events = kevent(queue_id, in_events.data(), in_events.size(), out_events,
- llvm::array_lengthof(out_events), nullptr);
+ num_events = kevent(loop.m_kqueue, in_events.data(), in_events.size(),
+ out_events, llvm::array_lengthof(out_events), nullptr);
if (num_events < 0)
return Error("kevent() failed with error %d\n", num_events);
return Error();
}
-template <typename F> void MainLoop::RunImpl::ForEachReadFD(F &&f) {
+void MainLoop::RunImpl::ProcessEvents() {
assert(num_events >= 0);
for (int i = 0; i < num_events; ++i) {
- f(out_events[i].ident);
if (loop.m_terminate_request)
return;
+ switch (out_events[i].filter) {
+ case EVFILT_READ:
+ loop.ProcessReadObject(out_events[i].ident);
+ break;
+ case EVFILT_SIGNAL:
+ loop.ProcessSignal(out_events[i].ident);
+ break;
+ default:
+ llvm_unreachable("Unknown event");
+ }
}
}
-template <typename F> void MainLoop::RunImpl::ForEachSignal(F && f) {}
#else
-MainLoop::RunImpl::~RunImpl() {}
-std::unique_ptr<MainLoop::RunImpl> MainLoop::RunImpl::Create(MainLoop &loop, Error &error)
-{
- error.Clear();
- return std::unique_ptr<RunImpl>(new RunImpl(loop));
+MainLoop::RunImpl::RunImpl(MainLoop &loop) : loop(loop) {
+#ifndef FORCE_PSELECT
+ read_fds.reserve(loop.m_read_fds.size());
+#endif
}
sigset_t MainLoop::RunImpl::get_sigmask() {
@@ -162,18 +147,14 @@ sigset_t MainLoop::RunImpl::get_sigmask() {
assert(ret == 0);
(void) ret;
- for (const auto &sig : loop.m_signals) {
- signals.push_back(sig.first);
+ for (const auto &sig : loop.m_signals)
sigdelset(&sigmask, sig.first);
- }
return sigmask;
#endif
}
#ifdef FORCE_PSELECT
Error MainLoop::RunImpl::Poll() {
- signals.clear();
-
FD_ZERO(&read_fd_set);
int nfds = 0;
for (const auto &fd : loop.m_read_fds) {
@@ -188,20 +169,8 @@ Error MainLoop::RunImpl::Poll() {
return Error();
}
-
-template <typename F> void MainLoop::RunImpl::ForEachReadFD(F &&f) {
- for (const auto &fd : loop.m_read_fds) {
- if(!FD_ISSET(fd.first, &read_fd_set))
- continue;
-
- f(fd.first);
- if (loop.m_terminate_request)
- return;
- }
-}
#else
Error MainLoop::RunImpl::Poll() {
- signals.clear();
read_fds.clear();
sigset_t sigmask = get_sigmask();
@@ -220,33 +189,47 @@ Error MainLoop::RunImpl::Poll() {
return Error();
}
+#endif
-template <typename F> void MainLoop::RunImpl::ForEachReadFD(F &&f) {
+void MainLoop::RunImpl::ProcessEvents() {
+#ifdef FORCE_PSELECT
+ for (const auto &fd : loop.m_read_fds) {
+ if (!FD_ISSET(fd.first, &read_fd_set))
+ continue;
+ IOObject::WaitableHandle handle = fd.first;
+#else
for (const auto &fd : read_fds) {
if ((fd.revents & POLLIN) == 0)
continue;
-
- f(fd.fd);
+ IOObject::WaitableHandle handle = fd.fd;
+#endif
if (loop.m_terminate_request)
return;
- }
-}
-#endif
-template <typename F> void MainLoop::RunImpl::ForEachSignal(F &&f) {
- for (int sig : signals) {
- if (g_signal_flags[sig] == 0)
- continue; // No signal
- g_signal_flags[sig] = 0;
- f(sig);
+ loop.ProcessReadObject(handle);
+ }
+ for (const auto &entry : loop.m_signals) {
if (loop.m_terminate_request)
return;
+ if (g_signal_flags[entry.first] == 0)
+ continue; // No signal
+ g_signal_flags[entry.first] = 0;
+ loop.ProcessSignal(entry.first);
}
}
#endif
+MainLoop::MainLoop() {
+#if HAVE_SYS_EVENT_H
+ m_kqueue = kqueue();
+ assert(m_kqueue >= 0);
+#endif
+}
MainLoop::~MainLoop() {
+#if HAVE_SYS_EVENT_H
+ close(m_kqueue);
+#endif
assert(m_read_fds.size() == 0);
assert(m_signals.size() == 0);
}
@@ -298,24 +281,30 @@ MainLoop::RegisterSignal(int signo, const Callback &callback,
new_action.sa_flags = SA_SIGINFO;
sigemptyset(&new_action.sa_mask);
sigaddset(&new_action.sa_mask, signo);
-
sigset_t old_set;
- if (int ret = pthread_sigmask(SIG_BLOCK, &new_action.sa_mask, &old_set)) {
- error.SetErrorStringWithFormat("pthread_sigmask failed with error %d\n",
- ret);
- return nullptr;
- }
- info.was_blocked = sigismember(&old_set, signo);
- if (sigaction(signo, &new_action, &info.old_action) == -1) {
- error.SetErrorToErrno();
- if (!info.was_blocked)
- pthread_sigmask(SIG_UNBLOCK, &new_action.sa_mask, nullptr);
- return nullptr;
- }
+ g_signal_flags[signo] = 0;
+
+ // Even if using kqueue, the signal handler will still be invoked, so it's
+ // important to replace it with our "bening" handler.
+ int ret = sigaction(signo, &new_action, &info.old_action);
+ assert(ret == 0 && "sigaction failed");
+#if HAVE_SYS_EVENT_H
+ struct kevent ev;
+ EV_SET(&ev, signo, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
+ ret = kevent(m_kqueue, &ev, 1, nullptr, 0, nullptr);
+ assert(ret == 0);
+#endif
+
+ // If we're using kqueue, the signal needs to be unblocked in order to recieve
+ // it. If using pselect/ppoll, we need to block it, and later unblock it as a
+ // part of the system call.
+ ret = pthread_sigmask(HAVE_SYS_EVENT_H ? SIG_UNBLOCK : SIG_BLOCK,
+ &new_action.sa_mask, &old_set);
+ assert(ret == 0 && "pthread_sigmask failed");
+ info.was_blocked = sigismember(&old_set, signo);
m_signals.insert({signo, info});
- g_signal_flags[signo] = 0;
return SignalHandleUP(new SignalHandle(*this, signo));
#endif
@@ -331,7 +320,6 @@ void MainLoop::UnregisterSignal(int signo) {
#if SIGNAL_POLLING_UNSUPPORTED
Error("Signal polling is not supported on this platform.");
#else
- // We undo the actions of RegisterSignal on a best-effort basis.
auto it = m_signals.find(signo);
assert(it != m_signals.end());
@@ -340,8 +328,17 @@ void MainLoop::UnregisterSignal(int signo) {
sigset_t set;
sigemptyset(&set);
sigaddset(&set, signo);
- pthread_sigmask(it->second.was_blocked ? SIG_BLOCK : SIG_UNBLOCK, &set,
- nullptr);
+ int ret = pthread_sigmask(it->second.was_blocked ? SIG_BLOCK : SIG_UNBLOCK,
+ &set, nullptr);
+ assert(ret == 0);
+ (void)ret;
+
+#if HAVE_SYS_EVENT_H
+ struct kevent ev;
+ EV_SET(&ev, signo, EVFILT_SIGNAL, EV_DELETE, 0, 0, 0);
+ ret = kevent(m_kqueue, &ev, 1, nullptr, 0, nullptr);
+ assert(ret == 0);
+#endif
m_signals.erase(it);
#endif
@@ -351,32 +348,31 @@ Error MainLoop::Run() {
m_terminate_request = false;
Error error;
- auto impl = RunImpl::Create(*this, error);
- if (!impl)
- return error;
+ RunImpl impl(*this);
// run until termination or until we run out of things to listen to
while (!m_terminate_request && (!m_read_fds.empty() || !m_signals.empty())) {
- error = impl->Poll();
+ error = impl.Poll();
if (error.Fail())
return error;
- impl->ForEachSignal([&](int sig) {
- auto it = m_signals.find(sig);
- if (it != m_signals.end())
- it->second.callback(*this); // Do the work
- });
- if (m_terminate_request)
- return Error();
+ impl.ProcessEvents();
- impl->ForEachReadFD([&](int fd) {
- auto it = m_read_fds.find(fd);
- if (it != m_read_fds.end())
- it->second(*this); // Do the work
- });
if (m_terminate_request)
return Error();
}
return Error();
}
+
+void MainLoop::ProcessSignal(int signo) {
+ auto it = m_signals.find(signo);
+ if (it != m_signals.end())
+ it->second.callback(*this); // Do the work
+}
+
+void MainLoop::ProcessReadObject(IOObject::WaitableHandle handle) {
+ auto it = m_read_fds.find(handle);
+ if (it != m_read_fds.end())
+ it->second(*this); // Do the work
+}
diff --git a/contrib/llvm/tools/lldb/source/Host/common/UDPSocket.cpp b/contrib/llvm/tools/lldb/source/Host/common/UDPSocket.cpp
index a32657aab0a6..ce8d90891b2b 100644
--- a/contrib/llvm/tools/lldb/source/Host/common/UDPSocket.cpp
+++ b/contrib/llvm/tools/lldb/source/Host/common/UDPSocket.cpp
@@ -28,31 +28,41 @@ const int kDomain = AF_INET;
const int kType = SOCK_DGRAM;
static const char *g_not_supported_error = "Not supported";
-} // namespace
-
-UDPSocket::UDPSocket(bool should_close, bool child_processes_inherit)
- : Socket(ProtocolUdp, should_close, child_processes_inherit) {}
+}
-UDPSocket::UDPSocket(NativeSocket socket, const UDPSocket &listen_socket)
- : Socket(ProtocolUdp, listen_socket.m_should_close_fd,
- listen_socket.m_child_processes_inherit) {
+UDPSocket::UDPSocket(NativeSocket socket) : Socket(ProtocolUdp, true, true) {
m_socket = socket;
}
+UDPSocket::UDPSocket(bool should_close, bool child_processes_inherit)
+ : Socket(ProtocolUdp, should_close, child_processes_inherit) {}
+
size_t UDPSocket::Send(const void *buf, const size_t num_bytes) {
return ::sendto(m_socket, static_cast<const char *>(buf), num_bytes, 0,
m_sockaddr, m_sockaddr.GetLength());
}
Error UDPSocket::Connect(llvm::StringRef name) {
+ return Error("%s", g_not_supported_error);
+}
+
+Error UDPSocket::Listen(llvm::StringRef name, int backlog) {
+ return Error("%s", g_not_supported_error);
+}
+
+Error UDPSocket::Accept(Socket *&socket) {
+ return Error("%s", g_not_supported_error);
+}
+
+Error UDPSocket::Connect(llvm::StringRef name, bool child_processes_inherit,
+ Socket *&socket) {
+ std::unique_ptr<UDPSocket> final_socket;
+
Log *log(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_CONNECTION));
if (log)
log->Printf("UDPSocket::%s (host/port = %s)", __FUNCTION__, name.data());
Error error;
- if (error.Fail())
- return error;
-
std::string host_str;
std::string port_str;
int32_t port = INT32_MIN;
@@ -84,11 +94,12 @@ Error UDPSocket::Connect(llvm::StringRef name) {
for (struct addrinfo *service_info_ptr = service_info_list;
service_info_ptr != nullptr;
service_info_ptr = service_info_ptr->ai_next) {
- m_socket = Socket::CreateSocket(
+ auto send_fd = CreateSocket(
service_info_ptr->ai_family, service_info_ptr->ai_socktype,
- service_info_ptr->ai_protocol, m_child_processes_inherit, error);
+ service_info_ptr->ai_protocol, child_processes_inherit, error);
if (error.Success()) {
- m_sockaddr = service_info_ptr;
+ final_socket.reset(new UDPSocket(send_fd));
+ final_socket->m_sockaddr = service_info_ptr;
break;
} else
continue;
@@ -96,17 +107,16 @@ Error UDPSocket::Connect(llvm::StringRef name) {
::freeaddrinfo(service_info_list);
- if (IsValid())
+ if (!final_socket)
return error;
SocketAddress bind_addr;
// Only bind to the loopback address if we are expecting a connection from
// localhost to avoid any firewall issues.
- const bool bind_addr_success =
- (host_str == "127.0.0.1" || host_str == "localhost")
- ? bind_addr.SetToLocalhost(kDomain, port)
- : bind_addr.SetToAnyAddress(kDomain, port);
+ const bool bind_addr_success = (host_str == "127.0.0.1" || host_str == "localhost")
+ ? bind_addr.SetToLocalhost(kDomain, port)
+ : bind_addr.SetToAnyAddress(kDomain, port);
if (!bind_addr_success) {
error.SetErrorString("Failed to get hostspec to bind for");
@@ -115,37 +125,13 @@ Error UDPSocket::Connect(llvm::StringRef name) {
bind_addr.SetPort(0); // Let the source port # be determined dynamically
- err = ::bind(m_socket, bind_addr, bind_addr.GetLength());
+ err = ::bind(final_socket->GetNativeSocket(), bind_addr, bind_addr.GetLength());
- error.Clear();
- return error;
-}
+ struct sockaddr_in source_info;
+ socklen_t address_len = sizeof (struct sockaddr_in);
+ err = ::getsockname(final_socket->GetNativeSocket(), (struct sockaddr *) &source_info, &address_len);
-Error UDPSocket::Listen(llvm::StringRef name, int backlog) {
- return Error("%s", g_not_supported_error);
-}
-
-Error UDPSocket::Accept(Socket *&socket) {
- return Error("%s", g_not_supported_error);
-}
-
-Error UDPSocket::CreateSocket() {
- Error error;
- if (IsValid())
- error = Close();
- if (error.Fail())
- return error;
- m_socket =
- Socket::CreateSocket(kDomain, kType, 0, m_child_processes_inherit, error);
- return error;
-}
-
-Error UDPSocket::Connect(llvm::StringRef name, bool child_processes_inherit,
- Socket *&socket) {
- std::unique_ptr<UDPSocket> final_socket(
- new UDPSocket(true, child_processes_inherit));
- Error error = final_socket->Connect(name);
- if (!error.Fail())
- socket = final_socket.release();
+ socket = final_socket.release();
+ error.Clear();
return error;
}
diff --git a/contrib/llvm/tools/lldb/source/Plugins/ABI/SysV-arm64/ABISysV_arm64.cpp b/contrib/llvm/tools/lldb/source/Plugins/ABI/SysV-arm64/ABISysV_arm64.cpp
index 04df0065d7bc..65cbd271e979 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/ABI/SysV-arm64/ABISysV_arm64.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/ABI/SysV-arm64/ABISysV_arm64.cpp
@@ -2362,32 +2362,30 @@ ValueObjectSP ABISysV_arm64::GetReturnValueObjectImpl(
if (success)
return_valobj_sp = ValueObjectConstResult::Create(
thread.GetStackFrameAtIndex(0).get(), value, ConstString(""));
- } else if (type_flags & eTypeIsVector) {
+ } else if (type_flags & eTypeIsVector && byte_size <= 16) {
if (byte_size > 0) {
const RegisterInfo *v0_info = reg_ctx->GetRegisterInfoByName("v0", 0);
if (v0_info) {
- if (byte_size <= v0_info->byte_size) {
- std::unique_ptr<DataBufferHeap> heap_data_ap(
- new DataBufferHeap(byte_size, 0));
- const ByteOrder byte_order = exe_ctx.GetProcessRef().GetByteOrder();
- RegisterValue reg_value;
- if (reg_ctx->ReadRegister(v0_info, reg_value)) {
- Error error;
- if (reg_value.GetAsMemoryData(v0_info, heap_data_ap->GetBytes(),
- heap_data_ap->GetByteSize(),
- byte_order, error)) {
- DataExtractor data(DataBufferSP(heap_data_ap.release()),
- byte_order,
- exe_ctx.GetProcessRef().GetAddressByteSize());
- return_valobj_sp = ValueObjectConstResult::Create(
- &thread, return_compiler_type, ConstString(""), data);
- }
+ std::unique_ptr<DataBufferHeap> heap_data_ap(
+ new DataBufferHeap(byte_size, 0));
+ const ByteOrder byte_order = exe_ctx.GetProcessRef().GetByteOrder();
+ RegisterValue reg_value;
+ if (reg_ctx->ReadRegister(v0_info, reg_value)) {
+ Error error;
+ if (reg_value.GetAsMemoryData(v0_info, heap_data_ap->GetBytes(),
+ heap_data_ap->GetByteSize(), byte_order,
+ error)) {
+ DataExtractor data(DataBufferSP(heap_data_ap.release()), byte_order,
+ exe_ctx.GetProcessRef().GetAddressByteSize());
+ return_valobj_sp = ValueObjectConstResult::Create(
+ &thread, return_compiler_type, ConstString(""), data);
}
}
}
}
- } else if (type_flags & eTypeIsStructUnion || type_flags & eTypeIsClass) {
+ } else if (type_flags & eTypeIsStructUnion || type_flags & eTypeIsClass ||
+ (type_flags & eTypeIsVector && byte_size > 16)) {
DataExtractor data;
uint32_t NGRN = 0; // Search ABI docs for NGRN
diff --git a/contrib/llvm/tools/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/contrib/llvm/tools/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 8c2fc3d3aa42..ad6af8dfebd5 100644
--- a/contrib/llvm/tools/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/contrib/llvm/tools/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -1946,7 +1946,9 @@ void SymbolFileDWARF::Index() {
std::vector<NameToDIE> type_index(num_compile_units);
std::vector<NameToDIE> namespace_index(num_compile_units);
- std::vector<bool> clear_cu_dies(num_compile_units, false);
+ // std::vector<bool> might be implemented using bit test-and-set, so use
+ // uint8_t instead.
+ std::vector<uint8_t> clear_cu_dies(num_compile_units, false);
auto parser_fn = [debug_info, &function_basename_index,
&function_fullname_index, &function_method_index,
&function_selector_index, &objc_class_selectors_index,
@@ -1963,22 +1965,18 @@ void SymbolFileDWARF::Index() {
return cu_idx;
};
- auto extract_fn = [debug_info](uint32_t cu_idx) {
+ auto extract_fn = [debug_info, &clear_cu_dies](uint32_t cu_idx) {
DWARFCompileUnit *dwarf_cu = debug_info->GetCompileUnitAtIndex(cu_idx);
if (dwarf_cu) {
// dwarf_cu->ExtractDIEsIfNeeded(false) will return zero if the
// DIEs for a compile unit have already been parsed.
- return std::make_pair(cu_idx, dwarf_cu->ExtractDIEsIfNeeded(false) > 1);
+ if (dwarf_cu->ExtractDIEsIfNeeded(false) > 1)
+ clear_cu_dies[cu_idx] = true;
}
- return std::make_pair(cu_idx, false);
};
// Create a task runner that extracts dies for each DWARF compile unit in a
// separate thread
- TaskRunner<std::pair<uint32_t, bool>> task_runner_extract;
- for (uint32_t cu_idx = 0; cu_idx < num_compile_units; ++cu_idx)
- task_runner_extract.AddTask(extract_fn, cu_idx);
-
//----------------------------------------------------------------------
// First figure out which compile units didn't have their DIEs already
// parsed and remember this. If no DIEs were parsed prior to this index
@@ -1988,48 +1986,37 @@ void SymbolFileDWARF::Index() {
// a DIE in one compile unit refers to another and the indexes accesses
// those DIEs.
//----------------------------------------------------------------------
- while (true) {
- auto f = task_runner_extract.WaitForNextCompletedTask();
- if (!f.valid())
- break;
- unsigned cu_idx;
- bool clear;
- std::tie(cu_idx, clear) = f.get();
- clear_cu_dies[cu_idx] = clear;
- }
+ TaskMapOverInt(0, num_compile_units, extract_fn);
// Now create a task runner that can index each DWARF compile unit in a
// separate
// thread so we can index quickly.
- TaskRunner<uint32_t> task_runner;
- for (uint32_t cu_idx = 0; cu_idx < num_compile_units; ++cu_idx)
- task_runner.AddTask(parser_fn, cu_idx);
+ TaskMapOverInt(0, num_compile_units, parser_fn);
- while (true) {
- std::future<uint32_t> f = task_runner.WaitForNextCompletedTask();
- if (!f.valid())
- break;
- uint32_t cu_idx = f.get();
-
- m_function_basename_index.Append(function_basename_index[cu_idx]);
- m_function_fullname_index.Append(function_fullname_index[cu_idx]);
- m_function_method_index.Append(function_method_index[cu_idx]);
- m_function_selector_index.Append(function_selector_index[cu_idx]);
- m_objc_class_selectors_index.Append(objc_class_selectors_index[cu_idx]);
- m_global_index.Append(global_index[cu_idx]);
- m_type_index.Append(type_index[cu_idx]);
- m_namespace_index.Append(namespace_index[cu_idx]);
- }
+ auto finalize_fn = [](NameToDIE &index, std::vector<NameToDIE> &srcs) {
+ for (auto &src : srcs)
+ index.Append(src);
+ index.Finalize();
+ };
- TaskPool::RunTasks([&]() { m_function_basename_index.Finalize(); },
- [&]() { m_function_fullname_index.Finalize(); },
- [&]() { m_function_method_index.Finalize(); },
- [&]() { m_function_selector_index.Finalize(); },
- [&]() { m_objc_class_selectors_index.Finalize(); },
- [&]() { m_global_index.Finalize(); },
- [&]() { m_type_index.Finalize(); },
- [&]() { m_namespace_index.Finalize(); });
+ TaskPool::RunTasks(
+ [&]() {
+ finalize_fn(m_function_basename_index, function_basename_index);
+ },
+ [&]() {
+ finalize_fn(m_function_fullname_index, function_fullname_index);
+ },
+ [&]() { finalize_fn(m_function_method_index, function_method_index); },
+ [&]() {
+ finalize_fn(m_function_selector_index, function_selector_index);
+ },
+ [&]() {
+ finalize_fn(m_objc_class_selectors_index, objc_class_selectors_index);
+ },
+ [&]() { finalize_fn(m_global_index, global_index); },
+ [&]() { finalize_fn(m_type_index, type_index); },
+ [&]() { finalize_fn(m_namespace_index, namespace_index); });
//----------------------------------------------------------------------
// Keep memory down by clearing DIEs for any compile units if indexing
diff --git a/contrib/llvm/tools/lldb/source/Target/ThreadPlanCallUserExpression.cpp b/contrib/llvm/tools/lldb/source/Target/ThreadPlanCallUserExpression.cpp
index 679040d09a02..15cbd0baa9a6 100644
--- a/contrib/llvm/tools/lldb/source/Target/ThreadPlanCallUserExpression.cpp
+++ b/contrib/llvm/tools/lldb/source/Target/ThreadPlanCallUserExpression.cpp
@@ -60,6 +60,12 @@ void ThreadPlanCallUserExpression::GetDescription(
ThreadPlanCallFunction::GetDescription(s, level);
}
+void ThreadPlanCallUserExpression::DidPush() {
+ ThreadPlanCallFunction::DidPush();
+ if (m_user_expression_sp)
+ m_user_expression_sp->WillStartExecuting();
+}
+
void ThreadPlanCallUserExpression::WillPop() {
ThreadPlanCallFunction::WillPop();
if (m_user_expression_sp)
@@ -113,3 +119,8 @@ StopInfoSP ThreadPlanCallUserExpression::GetRealStopInfo() {
return stop_info_sp;
}
+
+void ThreadPlanCallUserExpression::DoTakedown(bool success) {
+ ThreadPlanCallFunction::DoTakedown(success);
+ m_user_expression_sp->DidFinishExecuting();
+}
diff --git a/contrib/llvm/tools/lldb/source/Utility/TaskPool.cpp b/contrib/llvm/tools/lldb/source/Utility/TaskPool.cpp
index 244e64fdb5fb..d8306dc7dc8f 100644
--- a/contrib/llvm/tools/lldb/source/Utility/TaskPool.cpp
+++ b/contrib/llvm/tools/lldb/source/Utility/TaskPool.cpp
@@ -73,3 +73,26 @@ void TaskPoolImpl::Worker(TaskPoolImpl *pool) {
f();
}
}
+
+void TaskMapOverInt(size_t begin, size_t end,
+ std::function<void(size_t)> const &func) {
+ std::atomic<size_t> idx{begin};
+ size_t num_workers =
+ std::min<size_t>(end, std::thread::hardware_concurrency());
+
+ auto wrapper = [&idx, end, &func]() {
+ while (true) {
+ size_t i = idx.fetch_add(1);
+ if (i >= end)
+ break;
+ func(i);
+ }
+ };
+
+ std::vector<std::future<void>> futures;
+ futures.reserve(num_workers);
+ for (size_t i = 0; i < num_workers; i++)
+ futures.push_back(TaskPool::AddTask(wrapper));
+ for (size_t i = 0; i < num_workers; i++)
+ futures[i].wait();
+}
diff --git a/contrib/llvm/tools/llvm-link/llvm-link.cpp b/contrib/llvm/tools/llvm-link/llvm-link.cpp
index 27199d53538e..568e5f8d2d58 100644
--- a/contrib/llvm/tools/llvm-link/llvm-link.cpp
+++ b/contrib/llvm/tools/llvm-link/llvm-link.cpp
@@ -300,7 +300,7 @@ static bool linkFiles(const char *argv0, LLVMContext &Context, Linker &L,
// does not do the ThinLink that would normally determine what values to
// promote.
for (auto &I : *Index) {
- for (auto &S : I.second) {
+ for (auto &S : I.second.SummaryList) {
if (GlobalValue::isLocalLinkage(S->linkage()))
S->setLinkage(GlobalValue::ExternalLinkage);
}
diff --git a/contrib/llvm/tools/llvm-lto/llvm-lto.cpp b/contrib/llvm/tools/llvm-lto/llvm-lto.cpp
index 27e5c5e122c2..2458d3d123ca 100644
--- a/contrib/llvm/tools/llvm-lto/llvm-lto.cpp
+++ b/contrib/llvm/tools/llvm-lto/llvm-lto.cpp
@@ -284,7 +284,7 @@ void printIndexStats() {
unsigned Calls = 0, Refs = 0, Functions = 0, Alias = 0, Globals = 0;
for (auto &Summaries : *Index) {
- for (auto &Summary : Summaries.second) {
+ for (auto &Summary : Summaries.second.SummaryList) {
Refs += Summary->refs().size();
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
Functions++;
diff --git a/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp b/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp
index b65dd40d25ff..f7d6ec53b030 100644
--- a/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp
+++ b/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp
@@ -74,7 +74,7 @@ Error AnalysisStyle::dump() {
if (!Tpi)
return Tpi.takeError();
- TypeDatabase TypeDB;
+ TypeDatabase TypeDB(Tpi->getNumTypeRecords());
TypeDatabaseVisitor DBV(TypeDB);
TypeDeserializer Deserializer;
TypeVisitorCallbackPipeline Pipeline;
diff --git a/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp b/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp
index ec1325ff2335..2dd4ef0fb30d 100644
--- a/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp
+++ b/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp
@@ -39,6 +39,7 @@
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
#include "llvm/DebugInfo/PDB/Native/PublicsStream.h"
#include "llvm/DebugInfo/PDB/Native/RawError.h"
+#include "llvm/DebugInfo/PDB/Native/TpiHashing.h"
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
#include "llvm/DebugInfo/PDB/PDBExtras.h"
#include "llvm/Object/COFF.h"
@@ -609,11 +610,8 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
VerLabel = "IPI Version";
}
- bool IsSilentDatabaseBuild = !DumpRecordBytes && !DumpRecords && !DumpTpiHash;
- if (IsSilentDatabaseBuild) {
- outs().flush();
- errs() << "Building Type Information For " << Label << "\n";
- }
+ if (!DumpRecordBytes && !DumpRecords && !DumpTpiHash)
+ return Error::success();
auto Tpi = (StreamIdx == StreamTPI) ? File.getPDBTpiStream()
: File.getPDBIpiStream();
@@ -623,38 +621,43 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
std::unique_ptr<DictScope> StreamScope;
std::unique_ptr<ListScope> RecordScope;
- if (!IsSilentDatabaseBuild) {
- StreamScope = llvm::make_unique<DictScope>(P, Label);
- P.printNumber(VerLabel, Tpi->getTpiVersion());
- P.printNumber("Record count", Tpi->NumTypeRecords());
- }
-
- TypeDatabase &StreamDB = (StreamIdx == StreamTPI) ? TypeDB : ItemDB;
+ StreamScope = llvm::make_unique<DictScope>(P, Label);
+ P.printNumber(VerLabel, Tpi->getTpiVersion());
+ P.printNumber("Record count", Tpi->getNumTypeRecords());
- TypeDatabaseVisitor DBV(StreamDB);
- CompactTypeDumpVisitor CTDV(StreamDB, &P);
- TypeDumpVisitor TDV(TypeDB, &P, false);
- if (StreamIdx == StreamIPI)
- TDV.setItemDB(ItemDB);
- RecordBytesVisitor RBV(P);
- TypeDeserializer Deserializer;
+ Optional<TypeDatabase> &StreamDB = (StreamIdx == StreamTPI) ? TypeDB : ItemDB;
- // We always need to deserialize and add it to the type database. This is
- // true if even if we're not dumping anything, because we could need the
- // type database for the purposes of dumping symbols.
- TypeVisitorCallbackPipeline Pipeline;
- Pipeline.addCallbackToPipeline(Deserializer);
- Pipeline.addCallbackToPipeline(DBV);
+ std::vector<std::unique_ptr<TypeVisitorCallbacks>> Visitors;
+ Visitors.push_back(make_unique<TypeDeserializer>());
+ if (!StreamDB.hasValue()) {
+ StreamDB.emplace(Tpi->getNumTypeRecords());
+ Visitors.push_back(make_unique<TypeDatabaseVisitor>(*StreamDB));
+ }
// If we're in dump mode, add a dumper with the appropriate detail level.
if (DumpRecords) {
+ std::unique_ptr<TypeVisitorCallbacks> Dumper;
if (opts::raw::CompactRecords)
- Pipeline.addCallbackToPipeline(CTDV);
- else
- Pipeline.addCallbackToPipeline(TDV);
+ Dumper = make_unique<CompactTypeDumpVisitor>(*StreamDB, &P);
+ else {
+ assert(TypeDB.hasValue());
+
+ auto X = make_unique<TypeDumpVisitor>(*TypeDB, &P, false);
+ if (StreamIdx == StreamIPI)
+ X->setItemDB(*ItemDB);
+ Dumper = std::move(X);
+ }
+ Visitors.push_back(std::move(Dumper));
}
if (DumpRecordBytes)
- Pipeline.addCallbackToPipeline(RBV);
+ Visitors.push_back(make_unique<RecordBytesVisitor>(P));
+
+ // We always need to deserialize and add it to the type database. This is
+ // true if even if we're not dumping anything, because we could need the
+ // type database for the purposes of dumping symbols.
+ TypeVisitorCallbackPipeline Pipeline;
+ for (const auto &V : Visitors)
+ Pipeline.addCallbackToPipeline(*V);
CVTypeVisitor Visitor(Pipeline);
@@ -680,7 +683,7 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
if (DumpTpiHash) {
DictScope DD(P, "Hash");
- P.printNumber("Number of Hash Buckets", Tpi->NumHashBuckets());
+ P.printNumber("Number of Hash Buckets", Tpi->getNumHashBuckets());
P.printNumber("Hash Key Size", Tpi->getHashKeySize());
P.printList("Values", Tpi->getHashValues());
@@ -700,19 +703,51 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
}
}
- if (!IsSilentDatabaseBuild) {
- ListScope L(P, "TypeIndexOffsets");
- for (const auto &IO : Tpi->getTypeIndexOffsets()) {
- P.printString(formatv("Index: {0:x}, Offset: {1:N}", IO.Type.getIndex(),
- (uint32_t)IO.Offset)
- .str());
- }
+ ListScope L(P, "TypeIndexOffsets");
+ for (const auto &IO : Tpi->getTypeIndexOffsets()) {
+ P.printString(formatv("Index: {0:x}, Offset: {1:N}", IO.Type.getIndex(),
+ (uint32_t)IO.Offset)
+ .str());
}
P.flush();
return Error::success();
}
+Error LLVMOutputStyle::buildTypeDatabase(uint32_t SN) {
+ assert(SN == StreamIPI || SN == StreamTPI);
+
+ auto &DB = (SN == StreamIPI) ? ItemDB : TypeDB;
+
+ if (DB.hasValue())
+ return Error::success();
+
+ auto Tpi =
+ (SN == StreamTPI) ? File.getPDBTpiStream() : File.getPDBIpiStream();
+
+ if (!Tpi)
+ return Tpi.takeError();
+
+ DB.emplace(Tpi->getNumTypeRecords());
+
+ TypeVisitorCallbackPipeline Pipeline;
+ TypeDeserializer Deserializer;
+ TypeDatabaseVisitor DBV(*DB);
+ Pipeline.addCallbackToPipeline(Deserializer);
+ Pipeline.addCallbackToPipeline(DBV);
+
+ auto HashValues = Tpi->getHashValues();
+ std::unique_ptr<TpiHashVerifier> HashVerifier;
+ if (!HashValues.empty()) {
+ HashVerifier =
+ make_unique<TpiHashVerifier>(HashValues, Tpi->getNumHashBuckets());
+ Pipeline.addCallbackToPipeline(*HashVerifier);
+ }
+
+ CVTypeVisitor Visitor(Pipeline);
+ return Visitor.visitTypeStream(Tpi->types(nullptr));
+}
+
Error LLVMOutputStyle::dumpDbiStream() {
bool DumpModules = opts::raw::DumpModules || opts::raw::DumpModuleSyms ||
opts::raw::DumpModuleFiles || opts::raw::DumpLineInfo;
@@ -750,43 +785,46 @@ Error LLVMOutputStyle::dumpDbiStream() {
if (DumpModules) {
ListScope L(P, "Modules");
- for (auto &Modi : DS->modules()) {
+ const DbiModuleList &Modules = DS->modules();
+ for (uint32_t I = 0; I < Modules.getModuleCount(); ++I) {
+ const DbiModuleDescriptor &Modi = Modules.getModuleDescriptor(I);
DictScope DD(P);
- P.printString("Name", Modi.Info.getModuleName().str());
- P.printNumber("Debug Stream Index", Modi.Info.getModuleStreamIndex());
- P.printString("Object File Name", Modi.Info.getObjFileName().str());
- P.printNumber("Num Files", Modi.Info.getNumberOfFiles());
- P.printNumber("Source File Name Idx", Modi.Info.getSourceFileNameIndex());
- P.printNumber("Pdb File Name Idx", Modi.Info.getPdbFilePathNameIndex());
- P.printNumber("Line Info Byte Size", Modi.Info.getC11LineInfoByteSize());
- P.printNumber("C13 Line Info Byte Size",
- Modi.Info.getC13LineInfoByteSize());
- P.printNumber("Symbol Byte Size", Modi.Info.getSymbolDebugInfoByteSize());
- P.printNumber("Type Server Index", Modi.Info.getTypeServerIndex());
- P.printBoolean("Has EC Info", Modi.Info.hasECInfo());
+ P.printString("Name", Modi.getModuleName().str());
+ P.printNumber("Debug Stream Index", Modi.getModuleStreamIndex());
+ P.printString("Object File Name", Modi.getObjFileName().str());
+ P.printNumber("Num Files", Modi.getNumberOfFiles());
+ P.printNumber("Source File Name Idx", Modi.getSourceFileNameIndex());
+ P.printNumber("Pdb File Name Idx", Modi.getPdbFilePathNameIndex());
+ P.printNumber("Line Info Byte Size", Modi.getC11LineInfoByteSize());
+ P.printNumber("C13 Line Info Byte Size", Modi.getC13LineInfoByteSize());
+ P.printNumber("Symbol Byte Size", Modi.getSymbolDebugInfoByteSize());
+ P.printNumber("Type Server Index", Modi.getTypeServerIndex());
+ P.printBoolean("Has EC Info", Modi.hasECInfo());
if (opts::raw::DumpModuleFiles) {
- std::string FileListName =
- to_string(Modi.SourceFiles.size()) + " Contributing Source Files";
+ std::string FileListName = to_string(Modules.getSourceFileCount(I)) +
+ " Contributing Source Files";
ListScope LL(P, FileListName);
- for (auto File : Modi.SourceFiles)
- P.printString(File.str());
+ for (auto File : Modules.source_files(I))
+ P.printString(File);
}
- bool HasModuleDI =
- (Modi.Info.getModuleStreamIndex() < File.getNumStreams());
+ bool HasModuleDI = (Modi.getModuleStreamIndex() < File.getNumStreams());
bool ShouldDumpSymbols =
(opts::raw::DumpModuleSyms || opts::raw::DumpSymRecordBytes);
if (HasModuleDI && (ShouldDumpSymbols || opts::raw::DumpLineInfo)) {
auto ModStreamData = MappedBlockStream::createIndexedStream(
File.getMsfLayout(), File.getMsfBuffer(),
- Modi.Info.getModuleStreamIndex());
+ Modi.getModuleStreamIndex());
- ModuleDebugStreamRef ModS(Modi.Info, std::move(ModStreamData));
+ ModuleDebugStreamRef ModS(Modi, std::move(ModStreamData));
if (auto EC = ModS.reload())
return EC;
if (ShouldDumpSymbols) {
+ if (auto EC = buildTypeDatabase(StreamTPI))
+ return EC;
+
ListScope SS(P, "Symbols");
- codeview::CVSymbolDumper SD(P, TypeDB, nullptr, false);
+ codeview::CVSymbolDumper SD(P, *TypeDB, nullptr, false);
bool HadError = false;
for (auto S : ModS.symbols(&HadError)) {
DictScope LL(P, "");
@@ -807,8 +845,10 @@ Error LLVMOutputStyle::dumpDbiStream() {
}
if (opts::raw::DumpLineInfo) {
ListScope SS(P, "LineInfo");
+ if (auto EC = buildTypeDatabase(StreamIPI))
+ return EC;
- C13RawVisitor V(P, File, ItemDB);
+ C13RawVisitor V(P, File, *ItemDB);
if (auto EC = codeview::visitModuleDebugFragments(
ModS.linesAndChecksums(), V))
return EC;
@@ -846,9 +886,10 @@ Error LLVMOutputStyle::dumpSectionContribs() {
{
DictScope DD(P, "Module");
P.printNumber("Index", SC.Imod);
- auto M = DS.modules();
- if (M.size() > SC.Imod) {
- P.printString("Name", M[SC.Imod].Info.getModuleName());
+ const DbiModuleList &Modules = DS.modules();
+ if (Modules.getModuleCount() > SC.Imod) {
+ P.printString("Name",
+ Modules.getModuleDescriptor(SC.Imod).getModuleName());
}
}
P.printNumber("Data CRC", SC.DataCrc);
@@ -925,7 +966,10 @@ Error LLVMOutputStyle::dumpPublicsStream() {
P.printList("Section Offsets", Publics->getSectionOffsets(),
printSectionOffset);
ListScope L(P, "Symbols");
- codeview::CVSymbolDumper SD(P, TypeDB, nullptr, false);
+ if (auto EC = buildTypeDatabase(StreamTPI))
+ return EC;
+
+ codeview::CVSymbolDumper SD(P, *TypeDB, nullptr, false);
bool HadError = false;
for (auto S : Publics->getSymbols(&HadError)) {
DictScope DD(P, "");
diff --git a/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.h b/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.h
index bfff3b8308db..b0e7e3406b36 100644
--- a/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.h
+++ b/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.h
@@ -12,6 +12,7 @@
#include "OutputStyle.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -28,6 +29,8 @@ public:
Error dump() override;
private:
+ Error buildTypeDatabase(uint32_t SN);
+
Error dumpFileHeaders();
Error dumpStreamSummary();
Error dumpFreePageMap();
@@ -51,8 +54,8 @@ private:
PDBFile &File;
ScopedPrinter P;
- codeview::TypeDatabase TypeDB;
- codeview::TypeDatabase ItemDB;
+ Optional<codeview::TypeDatabase> TypeDB;
+ Optional<codeview::TypeDatabase> ItemDB;
SmallVector<std::string, 32> StreamPurposes;
};
}
diff --git a/contrib/llvm/tools/llvm-pdbdump/StreamUtil.cpp b/contrib/llvm/tools/llvm-pdbdump/StreamUtil.cpp
index 6577702adac8..81aa256b5002 100644
--- a/contrib/llvm/tools/llvm-pdbdump/StreamUtil.cpp
+++ b/contrib/llvm/tools/llvm-pdbdump/StreamUtil.cpp
@@ -12,6 +12,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/DbiStream.h"
#include "llvm/DebugInfo/PDB/Native/InfoStream.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
@@ -30,14 +31,16 @@ void discoverStreamPurposes(PDBFile &File,
auto Info = File.getPDBInfoStream();
uint32_t StreamCount = File.getNumStreams();
- DenseMap<uint16_t, const ModuleInfoEx *> ModStreams;
+ DenseMap<uint16_t, DbiModuleDescriptor> ModStreams;
DenseMap<uint16_t, std::string> NamedStreams;
if (Dbi) {
- for (auto &ModI : Dbi->modules()) {
- uint16_t SN = ModI.Info.getModuleStreamIndex();
+ const DbiModuleList &Modules = Dbi->modules();
+ for (uint32_t I = 0; I < Modules.getModuleCount(); ++I) {
+ DbiModuleDescriptor Descriptor = Modules.getModuleDescriptor(I);
+ uint16_t SN = Descriptor.getModuleStreamIndex();
if (SN != kInvalidStreamIndex)
- ModStreams[SN] = &ModI;
+ ModStreams[SN] = Descriptor;
}
}
if (Info) {
@@ -109,7 +112,7 @@ void discoverStreamPurposes(PDBFile &File,
auto NSIter = NamedStreams.find(StreamIdx);
if (ModIter != ModStreams.end()) {
Value = "Module \"";
- Value += ModIter->second->Info.getModuleName().str();
+ Value += ModIter->second.getModuleName();
Value += "\"";
} else if (NSIter != NamedStreams.end()) {
Value = "Named Stream \"";
diff --git a/contrib/llvm/tools/llvm-pdbdump/YAMLOutputStyle.cpp b/contrib/llvm/tools/llvm-pdbdump/YAMLOutputStyle.cpp
index b94b5a4abf37..0573b23cdc76 100644
--- a/contrib/llvm/tools/llvm-pdbdump/YAMLOutputStyle.cpp
+++ b/contrib/llvm/tools/llvm-pdbdump/YAMLOutputStyle.cpp
@@ -305,23 +305,28 @@ Error YAMLOutputStyle::dumpDbiStream() {
Obj.DbiStream->PdbDllVersion = DS.getPdbDllVersion();
Obj.DbiStream->VerHeader = DS.getDbiVersion();
if (opts::pdb2yaml::DbiModuleInfo) {
- for (const auto &MI : DS.modules()) {
+ const auto &Modules = DS.modules();
+ for (uint32_t I = 0; I < Modules.getModuleCount(); ++I) {
+ DbiModuleDescriptor MI = Modules.getModuleDescriptor(I);
+
Obj.DbiStream->ModInfos.emplace_back();
yaml::PdbDbiModuleInfo &DMI = Obj.DbiStream->ModInfos.back();
- DMI.Mod = MI.Info.getModuleName();
- DMI.Obj = MI.Info.getObjFileName();
- if (opts::pdb2yaml::DbiModuleSourceFileInfo)
- DMI.SourceFiles = MI.SourceFiles;
+ DMI.Mod = MI.getModuleName();
+ DMI.Obj = MI.getObjFileName();
+ if (opts::pdb2yaml::DbiModuleSourceFileInfo) {
+ auto Files = Modules.source_files(I);
+ DMI.SourceFiles.assign(Files.begin(), Files.end());
+ }
- uint16_t ModiStream = MI.Info.getModuleStreamIndex();
+ uint16_t ModiStream = MI.getModuleStreamIndex();
if (ModiStream == kInvalidStreamIndex)
continue;
auto ModStreamData = msf::MappedBlockStream::createIndexedStream(
File.getMsfLayout(), File.getMsfBuffer(), ModiStream);
- pdb::ModuleDebugStreamRef ModS(MI.Info, std::move(ModStreamData));
+ pdb::ModuleDebugStreamRef ModS(MI, std::move(ModStreamData));
if (auto EC = ModS.reload())
return EC;
diff --git a/contrib/llvm/tools/llvm-readobj/COFFDumper.cpp b/contrib/llvm/tools/llvm-readobj/COFFDumper.cpp
index 04386875b95a..049af2c4f076 100644
--- a/contrib/llvm/tools/llvm-readobj/COFFDumper.cpp
+++ b/contrib/llvm/tools/llvm-readobj/COFFDumper.cpp
@@ -44,6 +44,7 @@
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/COFF.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataExtractor.h"
@@ -70,7 +71,7 @@ class COFFDumper : public ObjDumper {
public:
friend class COFFObjectDumpDelegate;
COFFDumper(const llvm::object::COFFObjectFile *Obj, ScopedPrinter &Writer)
- : ObjDumper(Writer), Obj(Obj), Writer(Writer) {}
+ : ObjDumper(Writer), Obj(Obj), Writer(Writer), TypeDB(100) {}
void printFileHeaders() override;
void printSections() override;
@@ -121,6 +122,10 @@ private:
uint32_t RelocOffset, uint32_t Offset,
StringRef *RelocSym = nullptr);
+ void printResourceDirectoryTable(ResourceSectionRef RSF,
+ const coff_resource_dir_table &Table,
+ StringRef Level);
+
void printBinaryBlockWithRelocs(StringRef Label, const SectionRef &Sec,
StringRef SectionContents, StringRef Block);
@@ -140,6 +145,9 @@ private:
void printDelayImportedSymbols(
const DelayImportDirectoryEntryRef &I,
iterator_range<imported_symbol_iterator> Range);
+ ErrorOr<const coff_resource_dir_entry &>
+ getResourceDirectoryTableEntry(const coff_resource_dir_table &Table,
+ uint32_t Index);
typedef DenseMap<const coff_section*, std::vector<RelocationRef> > RelocMapTy;
@@ -534,6 +542,29 @@ static const EnumEntry<uint8_t> FileChecksumKindNames[] = {
LLVM_READOBJ_ENUM_CLASS_ENT(FileChecksumKind, SHA256),
};
+static const EnumEntry<COFF::ResourceTypeID> ResourceTypeNames[]{
+ {"kRT_CURSOR (ID 1)", COFF::RID_Cursor},
+ {"kRT_BITMAP (ID 2)", COFF::RID_Bitmap},
+ {"kRT_ICON (ID 3)", COFF::RID_Icon},
+ {"kRT_MENU (ID 4)", COFF::RID_Menu},
+ {"kRT_DIALOG (ID 5)", COFF::RID_Dialog},
+ {"kRT_STRING (ID 6)", COFF::RID_String},
+ {"kRT_FONTDIR (ID 7)", COFF::RID_FontDir},
+ {"kRT_FONT (ID 8)", COFF::RID_Font},
+ {"kRT_ACCELERATOR (ID 9)", COFF::RID_Accelerator},
+ {"kRT_RCDATA (ID 10)", COFF::RID_RCData},
+ {"kRT_MESSAGETABLE (ID 11)", COFF::RID_MessageTable},
+ {"kRT_GROUP_CURSOR (ID 12)", COFF::RID_Group_Cursor},
+ {"kRT_GROUP_ICON (ID 14)", COFF::RID_Group_Icon},
+ {"kRT_VERSION (ID 16)", COFF::RID_Version},
+ {"kRT_DLGINCLUDE (ID 17)", COFF::RID_DLGInclude},
+ {"kRT_PLUGPLAY (ID 19)", COFF::RID_PlugPlay},
+ {"kRT_VXD (ID 20)", COFF::RID_VXD},
+ {"kRT_ANICURSOR (ID 21)", COFF::RID_AniCursor},
+ {"kRT_ANIICON (ID 22)", COFF::RID_AniIcon},
+ {"kRT_HTML (ID 23)", COFF::RID_HTML},
+ {"kRT_MANIFEST (ID 24)", COFF::RID_Manifest}};
+
template <typename T>
static std::error_code getSymbolAuxData(const COFFObjectFile *Obj,
COFFSymbolRef Symbol,
@@ -1503,18 +1534,76 @@ void COFFDumper::printCOFFResources() {
error(S.getContents(Ref));
if ((Name == ".rsrc") || (Name == ".rsrc$01")) {
- auto Table =
- reinterpret_cast<const coff_resource_dir_table *>(Ref.data());
- char FormattedTime[20];
- time_t TDS = time_t(Table->TimeDateStamp);
- strftime(FormattedTime, sizeof(FormattedTime), "%Y-%m-%d %H:%M:%S",
- gmtime(&TDS));
- W.printHex("Time/Date Stamp", FormattedTime, Table->TimeDateStamp);
+ ResourceSectionRef RSF(Ref);
+ auto &BaseTable = unwrapOrError(RSF.getBaseTable());
+ printResourceDirectoryTable(RSF, BaseTable, "Type");
+ }
+ if (opts::SectionData)
+ W.printBinaryBlock(Name.str() + " Data", Ref);
+ }
+}
+
+void COFFDumper::printResourceDirectoryTable(
+ ResourceSectionRef RSF, const coff_resource_dir_table &Table,
+ StringRef Level) {
+ W.printNumber("String Name Entries", Table.NumberOfNameEntries);
+ W.printNumber("ID Entries", Table.NumberOfIDEntries);
+
+ char FormattedTime[20] = {};
+ time_t TDS = time_t(Table.TimeDateStamp);
+ strftime(FormattedTime, 20, "%Y-%m-%d %H:%M:%S", gmtime(&TDS));
+
+ // Iterate through level in resource directory tree.
+ for (int i = 0; i < Table.NumberOfNameEntries + Table.NumberOfIDEntries;
+ i++) {
+ auto Entry = unwrapOrError(getResourceDirectoryTableEntry(Table, i));
+ StringRef Name;
+ SmallString<20> IDStr;
+ raw_svector_ostream OS(IDStr);
+ if (i < Table.NumberOfNameEntries) {
+ ArrayRef<UTF16> RawEntryNameString = unwrapOrError(RSF.getEntryNameString(Entry));
+ std::string EntryNameString;
+ if (!llvm::convertUTF16ToUTF8String(RawEntryNameString, EntryNameString))
+ error(object_error::parse_failed);
+ OS << ": ";
+ OS << EntryNameString;
+ } else {
+ if (Level == "Type") {
+ ScopedPrinter Printer(OS);
+ Printer.printEnum("", Entry.Identifier.ID,
+ makeArrayRef(ResourceTypeNames));
+ IDStr = IDStr.slice(0, IDStr.find_first_of(")", 0) + 1);
+ } else {
+ OS << ": (ID " << Entry.Identifier.ID << ")";
+ }
+ }
+ Name = StringRef(IDStr);
+ ListScope ResourceType(W, Level.str() + Name.str());
+ if (Entry.Offset.isSubDir()) {
+ StringRef NextLevel;
+ if (Level == "Name")
+ NextLevel = "Language";
+ else
+ NextLevel = "Name";
+ auto &NextTable = unwrapOrError(RSF.getEntrySubDir(Entry));
+ printResourceDirectoryTable(RSF, NextTable, NextLevel);
+ } else {
+ W.printHex("Time/Date Stamp", FormattedTime, Table.TimeDateStamp);
+ W.printNumber("Major Version", Table.MajorVersion);
+ W.printNumber("Minor Version", Table.MinorVersion);
}
- W.printBinaryBlock(Name.str() + " Data", Ref);
}
}
+ErrorOr<const coff_resource_dir_entry &>
+COFFDumper::getResourceDirectoryTableEntry(const coff_resource_dir_table &Table,
+ uint32_t Index) {
+ if (Index >= (uint32_t)(Table.NumberOfNameEntries + Table.NumberOfIDEntries))
+ return object_error::parse_failed;
+ auto TablePtr = reinterpret_cast<const coff_resource_dir_entry *>(&Table + 1);
+ return TablePtr[Index];
+}
+
void COFFDumper::printStackMap() const {
object::SectionRef StackMapSection;
for (auto Sec : Obj->sections()) {
@@ -1553,7 +1642,7 @@ void llvm::dumpCodeViewMergedTypes(ScopedPrinter &Writer,
TypeBuf.append(Record.begin(), Record.end());
});
- TypeDatabase TypeDB;
+ TypeDatabase TypeDB(CVTypes.records().size());
{
ListScope S(Writer, "MergedTypeStream");
CVTypeDumper CVTD(TypeDB);
@@ -1574,7 +1663,7 @@ void llvm::dumpCodeViewMergedTypes(ScopedPrinter &Writer,
{
ListScope S(Writer, "MergedIDStream");
- TypeDatabase IDDB;
+ TypeDatabase IDDB(IDTable.records().size());
CVTypeDumper CVTD(IDDB);
TypeDumpVisitor TDV(TypeDB, &Writer, opts::CodeViewSubsectionBytes);
TDV.setItemDB(IDDB);
diff --git a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
index 4e1caa0400f1..75345de50280 100644
--- a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -486,10 +486,7 @@ static int checkAllExpressions(RuntimeDyldChecker &Checker) {
return 0;
}
-static std::map<void *, uint64_t>
-applySpecificSectionMappings(RuntimeDyldChecker &Checker) {
-
- std::map<void*, uint64_t> SpecificMappings;
+void applySpecificSectionMappings(RuntimeDyldChecker &Checker) {
for (StringRef Mapping : SpecificSectionMappings) {
@@ -522,10 +519,7 @@ applySpecificSectionMappings(RuntimeDyldChecker &Checker) {
"'.");
Checker.getRTDyld().mapSectionAddress(OldAddr, NewAddr);
- SpecificMappings[OldAddr] = NewAddr;
}
-
- return SpecificMappings;
}
// Scatter sections in all directions!
@@ -554,8 +548,7 @@ static void remapSectionsAndSymbols(const llvm::Triple &TargetTriple,
// Apply any section-specific mappings that were requested on the command
// line.
- typedef std::map<void*, uint64_t> AppliedMappingsT;
- AppliedMappingsT AppliedMappings = applySpecificSectionMappings(Checker);
+ applySpecificSectionMappings(Checker);
// Keep an "already allocated" mapping of section target addresses to sizes.
// Sections whose address mappings aren't specified on the command line will
@@ -563,15 +556,19 @@ static void remapSectionsAndSymbols(const llvm::Triple &TargetTriple,
// minimum separation.
std::map<uint64_t, uint64_t> AlreadyAllocated;
- // Move the previously applied mappings into the already-allocated map.
+ // Move the previously applied mappings (whether explicitly specified on the
+ // command line, or implicitly set by RuntimeDyld) into the already-allocated
+ // map.
for (WorklistT::iterator I = Worklist.begin(), E = Worklist.end();
I != E;) {
WorklistT::iterator Tmp = I;
++I;
- AppliedMappingsT::iterator AI = AppliedMappings.find(Tmp->first);
+ auto LoadAddr = Checker.getSectionLoadAddress(Tmp->first);
- if (AI != AppliedMappings.end()) {
- AlreadyAllocated[AI->second] = Tmp->second;
+ if (LoadAddr &&
+ *LoadAddr != static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Tmp->first))) {
+ AlreadyAllocated[*LoadAddr] = Tmp->second;
Worklist.erase(Tmp);
}
}
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
index 1d05e7d8d342..3231f0865f8b 100644
--- a/lib/clang/include/clang/Basic/Version.inc
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
-#define SVN_REVISION "302069"
+#define SVN_REVISION "302418"
diff --git a/lib/clang/include/lld/Config/Version.inc b/lib/clang/include/lld/Config/Version.inc
index 5e9f929a3de6..bc855198f1bf 100644
--- a/lib/clang/include/lld/Config/Version.inc
+++ b/lib/clang/include/lld/Config/Version.inc
@@ -4,5 +4,5 @@
#define LLD_VERSION_STRING "5.0.0"
#define LLD_VERSION_MAJOR 5
#define LLD_VERSION_MINOR 0
-#define LLD_REVISION_STRING "302069"
+#define LLD_REVISION_STRING "302418"
#define LLD_REPOSITORY_STRING "FreeBSD"
diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h
index 3869c2098258..e0782eaddcf2 100644
--- a/lib/clang/include/llvm/Support/VCSRevision.h
+++ b/lib/clang/include/llvm/Support/VCSRevision.h
@@ -1,2 +1,2 @@
/* $FreeBSD$ */
-#define LLVM_REVISION "svn-r302069"
+#define LLVM_REVISION "svn-r302418"
diff --git a/lib/clang/libllvm/Makefile b/lib/clang/libllvm/Makefile
index df43b0a1d6d1..e806c0b402bd 100644
--- a/lib/clang/libllvm/Makefile
+++ b/lib/clang/libllvm/Makefile
@@ -364,6 +364,7 @@ SRCS_EXT+= DebugInfo/PDB/GenericError.cpp
SRCS_EXT+= DebugInfo/PDB/IPDBSourceFile.cpp
SRCS_EXT+= DebugInfo/PDB/Native/DbiModuleDescriptor.cpp
SRCS_EXT+= DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
+SRCS_EXT+= DebugInfo/PDB/Native/DbiModuleList.cpp
SRCS_EXT+= DebugInfo/PDB/Native/DbiStream.cpp
SRCS_EXT+= DebugInfo/PDB/Native/DbiStreamBuilder.cpp
SRCS_EXT+= DebugInfo/PDB/Native/EnumTables.cpp
@@ -731,7 +732,6 @@ SRCS_MIN+= TableGen/TGParser.cpp
SRCS_MIN+= TableGen/TableGenBackend.cpp
SRCS_MIN+= Target/AArch64/AArch64A53Fix835769.cpp
SRCS_MIN+= Target/AArch64/AArch64A57FPLoadBalancing.cpp
-SRCS_MIN+= Target/AArch64/AArch64AddressTypePromotion.cpp
SRCS_MIN+= Target/AArch64/AArch64AdvSIMDScalarPass.cpp
SRCS_MIN+= Target/AArch64/AArch64AsmPrinter.cpp
SRCS_MIN+= Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
diff --git a/usr.bin/clang/lld/Makefile b/usr.bin/clang/lld/Makefile
index 2f4ea3d6a80b..901992381437 100644
--- a/usr.bin/clang/lld/Makefile
+++ b/usr.bin/clang/lld/Makefile
@@ -51,6 +51,7 @@ SRCS+= lib/Core/Reproduce.cpp
SRCS+= lib/Core/Resolver.cpp
SRCS+= lib/Core/SymbolTable.cpp
SRCS+= lib/Core/TargetOptionsCommandFlags.cpp
+SRCS+= lib/Core/TaskGroup.cpp
SRCS+= tools/lld/lld.cpp
.include "${SRCTOP}/lib/clang/llvm.build.mk"